[ https://issues.apache.org/jira/browse/KAFKA-8549?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17379665#comment-17379665 ]
DEBENDRA DHINDA commented on KAFKA-8549: ---------------------------------------- Is it resolved ? > Kafka Windows start up fail due to cannot be performed on a file with a > user-mapped section open > ------------------------------------------------------------------------------------------------ > > Key: KAFKA-8549 > URL: https://issues.apache.org/jira/browse/KAFKA-8549 > Project: Kafka > Issue Type: Bug > Components: core > Affects Versions: 2.2.1 > Reporter: prehistoricpenguin > Priority: Major > Labels: crash, windows > > We are running Kafka server on windows, we got this exception during Kafka > server start up: > {code:java} > 2019-06-11 14:50:48,537] ERROR Error while creating log for > this_is_a_topic_name in dir C:\Program Files (x86)\dummy_path\tmp\kafka-logs > (kafka.server.LogDirFailureChannel) > java.io.IOException: The requested operation cannot be performed on a file > with a user-mapped section open > at java.io.RandomAccessFile.setLength(Native Method) > at kafka.log.AbstractIndex.$anonfun$resize$1(AbstractIndex.scala:188) > at scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23) > at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:251) > at kafka.log.AbstractIndex.resize(AbstractIndex.scala:175) > at kafka.log.AbstractIndex.$anonfun$trimToValidSize$1(AbstractIndex.scala:238) > at scala.runtime.java8.JFunction0$mcZ$sp.apply(JFunction0$mcZ$sp.java:23) > at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:251) > at kafka.log.AbstractIndex.trimToValidSize(AbstractIndex.scala:238) > at kafka.log.LogSegment.recover(LogSegment.scala:377) > at kafka.log.Log.recoverSegment(Log.scala:500) > at kafka.log.Log.$anonfun$loadSegmentFiles$3(Log.scala:482) > at > scala.collection.TraversableLike$WithFilter.$anonfun$foreach$1(TraversableLike.scala:792) > at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36) > at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33) > at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198) > at > scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:791) > at kafka.log.Log.loadSegmentFiles(Log.scala:454) > at kafka.log.Log.$anonfun$loadSegments$1(Log.scala:565) > at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23) > at kafka.log.Log.retryOnOffsetOverflow(Log.scala:2034) > at kafka.log.Log.loadSegments(Log.scala:559) > at kafka.log.Log.<init>(Log.scala:292) > at kafka.log.Log$.apply(Log.scala:2168) > at kafka.log.LogManager.$anonfun$getOrCreateLog$1(LogManager.scala:716) > at scala.Option.getOrElse(Option.scala:138) > at kafka.log.LogManager.getOrCreateLog(LogManager.scala:674) > at kafka.cluster.Partition.$anonfun$getOrCreateReplica$1(Partition.scala:202) > at kafka.utils.Pool$$anon$1.apply(Pool.scala:61) > at > java.util.concurrent.ConcurrentHashMap.computeIfAbsent(ConcurrentHashMap.java:1660) > at kafka.utils.Pool.getAndMaybePut(Pool.scala:60) > at kafka.cluster.Partition.getOrCreateReplica(Partition.scala:198) > at kafka.cluster.Partition.$anonfun$makeLeader$3(Partition.scala:376) > at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:237) > at scala.collection.Iterator.foreach(Iterator.scala:941) > at scala.collection.Iterator.foreach$(Iterator.scala:941) > at scala.collection.AbstractIterator.foreach(Iterator.scala:1429) > at scala.collection.IterableLike.foreach(IterableLike.scala:74) > at scala.collection.IterableLike.foreach$(IterableLike.scala:73) > at scala.collection.AbstractIterable.foreach(Iterable.scala:56) > at scala.collection.TraversableLike.map(TraversableLike.scala:237) > at scala.collection.TraversableLike.map$(TraversableLike.scala:230) > at scala.collection.AbstractTraversable.map(Traversable.scala:108) > at kafka.cluster.Partition.$anonfun$makeLeader$1(Partition.scala:376) > at kafka.utils.CoreUtils$.inLock(CoreUtils.scala:251) > at kafka.utils.CoreUtils$.inWriteLock(CoreUtils.scala:259) > at kafka.cluster.Partition.makeLeader(Partition.scala:370) > at > kafka.server.ReplicaManager.$anonfun$makeLeaders$5(ReplicaManager.scala:1188) > at scala.collection.mutable.HashMap.$anonfun$foreach$1(HashMap.scala:149) > at scala.collection.mutable.HashTable.foreachEntry(HashTable.scala:237) > at scala.collection.mutable.HashTable.foreachEntry$(HashTable.scala:230) > at scala.collection.mutable.HashMap.foreachEntry(HashMap.scala:44) > at scala.collection.mutable.HashMap.foreach(HashMap.scala:149) > at kafka.server.ReplicaManager.makeLeaders(ReplicaManager.scala:1186) > at > kafka.server.ReplicaManager.becomeLeaderOrFollower(ReplicaManager.scala:1098) > at kafka.server.KafkaApis.handleLeaderAndIsrRequest(KafkaApis.scala:195) > at kafka.server.KafkaApis.handle(KafkaApis.scala:112) > at kafka.server.KafkaRequestHandler.run(KafkaRequestHandler.scala:69) > at java.lang.Thread.run(Thread.java:748) > [2019-06-11 14:50:48,542] INFO [ReplicaManager broker=0] Stopping serving > replicas in dir C:\Program Files (x86)\dummy_path\tmp\kafka-logs > (kafka.server.ReplicaManager) > [2019-06-11 14:50:48,543] ERROR [ReplicaManager broker=0] Error while making > broker the leader for partition Topic: this_is_a_topic_name; Partition: 0; > Leader: None; AllReplicas: ; InSyncReplicas: in dir None > (kafka.server.ReplicaManager) > {code} > After our investigation, we find that we have two similar topic, their name > are the same if we ignore the case, if we delete one of the topics, then the > problem will be solved. -- This message was sent by Atlassian Jira (v8.3.4#803005)