You are viewing a plain text version of this content. The canonical link for it is here.
Posted to jira@kafka.apache.org by "翟玉勇 (JIRA)" <ji...@apache.org> on 2018/10/07 07:21:00 UTC

[jira] [Commented] (KAFKA-7442) forceUnmap mmap on linux when index resize

    [ https://issues.apache.org/jira/browse/KAFKA-7442?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16640983#comment-16640983 ] 

翟玉勇 commented on KAFKA-7442:
----------------------------

[~huxi_2b] 
mmap = raf.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, roundedNewSize)
requote a MappedByteBuffer object when resize, clean old MappedByteBuffer object rely on mixgc or full gc.  KAFKA-4614 is delete indexfile not allocation a new MappedByteBuffer object

> forceUnmap mmap on linux when index resize
> ------------------------------------------
>
>                 Key: KAFKA-7442
>                 URL: https://issues.apache.org/jira/browse/KAFKA-7442
>             Project: Kafka
>          Issue Type: Bug
>          Components: log
>    Affects Versions: 0.10.1.1
>            Reporter: 翟玉勇
>            Priority: Major
>
> when resize OffsetIndex or TimeIndex,We should force unmap mmap for linux platform. Rather than waiting mixedgc or  fullgc to unmap MappedByteBuffer objects
> ##before full gc
> {code}
> {"request":{"mbean":"java.nio:name=mapped,type=BufferPool","type":"read"},"value":{"TotalCapacity":2434496968,"MemoryUsed":2434496968,"Count":5392,"Name":"mapped","ObjectName":{"objectName":"java.nio:name=mapped,type=BufferPool"}},"timestamp":1537945759,"status":200}
> S0     S1     E      O      M     CCS    YGC     YGCT    FGC    FGCT     GCT   
> 0.00 100.00  28.88   4.93  97.64  94.72     24    0.176     0    0.000    0.176
> 0.00 100.00  31.37   4.93  97.64  94.72     24    0.176     0    0.000    0.176
> {code}
> {code}
> jmap -histo:live kafka_pid
> {code}
>  
> ###after full gc
> {code}
> S0     S1     E      O      M     CCS    YGC     YGCT    FGC    FGCT     GCT   
> 0.00   0.00  23.22   5.03  97.92  94.93     24    0.176     1    0.617    0.793
> 0.00   0.00  25.70   5.03  97.92  94.93     24    0.176     1    0.617    0.793
> 0.00   0.00  27.86   5.03  97.92  94.93     24    0.176     1    0.617    0.793
> {"request":{"mbean":"java.nio:name=mapped,type=BufferPool","type":"read"},"value":{"TotalCapacity":1868266036,"MemoryUsed":1868266036,"Count":5338,"Name":"mapped","ObjectName":{"objectName":"java.nio:name=mapped,type=BufferPool"}},"timestamp":1537945860,"status":200}
> {code}
> {code}
> def resize(newSize: Int) {
>     inLock(lock) {
>       val raf = new RandomAccessFile(_file, "rw")
>       val roundedNewSize = roundDownToExactMultiple(newSize, entrySize)
>       val position = mmap.position
>       /* Windows won't let us modify the file length while the file is mmapped :-( */
>       if(Os.isWindows)
>         forceUnmap(mmap)
>       try {
>         raf.setLength(roundedNewSize)
>         mmap = raf.getChannel().map(FileChannel.MapMode.READ_WRITE, 0, roundedNewSize)
>         _maxEntries = mmap.limit / entrySize
>         mmap.position(position)
>       } finally {
>         CoreUtils.swallow(raf.close())
>       }
>     }
>   }
> {code}
> {code}
> [2018-09-21 13:12:24,078] INFO Rolled new log segment for 'topic-265' in 2 ms. (kafka.log.Log)
> [2018-09-21 13:13:16,436] FATAL [ReplicaFetcherThread-12-15], Disk error while replicating data for topic-264 (kafka.server.ReplicaFetcherThread)
> kafka.common.KafkaStorageException: I/O exception in append to log 'topic-264'
>         at kafka.log.Log.append(Log.scala:349)
>         at kafka.server.ReplicaFetcherThread.processPartitionData(ReplicaFetcherThread.scala:130)
>         at kafka.server.ReplicaFetcherThread.processPartitionData(ReplicaFetcherThread.scala:42)
>         at kafka.server.AbstractFetcherThread$$anonfun$processFetchRequest$2$$anonfun$apply$mcV$sp$1$$anonfun$apply$2.apply(AbstractFetcherThread.scala:153)
>         at kafka.server.AbstractFetcherThread$$anonfun$processFetchRequest$2$$anonfun$apply$mcV$sp$1$$anonfun$apply$2.apply(AbstractFetcherThread.scala:141)
>         at scala.Option.foreach(Option.scala:257)
>         at kafka.server.AbstractFetcherThread$$anonfun$processFetchRequest$2$$anonfun$apply$mcV$sp$1.apply(AbstractFetcherThread.scala:141)
>         at kafka.server.AbstractFetcherThread$$anonfun$processFetchRequest$2$$anonfun$apply$mcV$sp$1.apply(AbstractFetcherThread.scala:138)
>         at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>         at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>         at kafka.server.AbstractFetcherThread$$anonfun$processFetchRequest$2.apply$mcV$sp(AbstractFetcherThread.scala:138)
>         at kafka.server.AbstractFetcherThread$$anonfun$processFetchRequest$2.apply(AbstractFetcherThread.scala:138)
>         at kafka.server.AbstractFetcherThread$$anonfun$processFetchRequest$2.apply(AbstractFetcherThread.scala:138)
>         at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:234)
>         at kafka.server.AbstractFetcherThread.processFetchRequest(AbstractFetcherThread.scala:136)
>         at kafka.server.AbstractFetcherThread.doWork(AbstractFetcherThread.scala:103)
>         at kafka.utils.ShutdownableThread.run(ShutdownableThread.scala:63)
> Caused by: java.io.IOException: Map failed
>         at sun.nio.ch.FileChannelImpl.map(FileChannelImpl.java:940)
>         at kafka.log.AbstractIndex.<init>(AbstractIndex.scala:61)
>         at kafka.log.OffsetIndex.<init>(OffsetIndex.scala:52)
>         at kafka.log.LogSegment.<init>(LogSegment.scala:67)
>         at kafka.log.Log.roll(Log.scala:778)
>         at kafka.log.Log.maybeRoll(Log.scala:744)
>         at kafka.log.Log.append(Log.scala:405)
>         ... 16 more
> Caused by: java.lang.OutOfMemoryError: Map failed
>         at sun.nio.ch.FileChannelImpl.map0(Native Method)
>         at sun.nio.ch.FileChannelImpl.map(FileChannelImpl.java:937)
>         ... 22 more
> {code}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)