You are viewing a plain text version of this content. The canonical link for it is here.
Posted to jira@kafka.apache.org by GitBox <gi...@apache.org> on 2021/02/02 01:33:36 UTC

[GitHub] [kafka] hachikuji commented on a change in pull request #10018: Upstream MetadataImage and related classes

hachikuji commented on a change in pull request #10018:
URL: https://github.com/apache/kafka/pull/10018#discussion_r568231718



##########
File path: core/src/main/scala/kafka/server/metadata/MetadataBrokers.scala
##########
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.concurrent.ThreadLocalRandom
+
+import kafka.cluster.BrokerEndPoint
+import kafka.common.BrokerEndPointNotAvailableException
+import org.apache.kafka.common.Node
+import org.apache.kafka.common.metadata.RegisterBrokerRecord
+import org.apache.kafka.common.network.ListenerName
+import org.slf4j.Logger
+
+import scala.jdk.CollectionConverters._
+
+object MetadataBroker {
+  def apply(record: RegisterBrokerRecord): MetadataBroker = {
+    new MetadataBroker(record.brokerId(), record.rack(),
+      record.endPoints().asScala.map {
+        case e => e.name() ->
+          new Node(record.brokerId(), e.host(), e.port(), record.rack())
+      }.toMap,
+      true)
+  }
+}
+
+case class MetadataBroker(id: Int,
+                          rack: String,
+                          endpoints: collection.Map[String, Node],
+                          fenced: Boolean) {
+  def brokerEndPoint(listenerName: ListenerName): BrokerEndPoint = {
+    endpoints.get(listenerName.value()) match {
+      case None => throw new BrokerEndPointNotAvailableException(
+        s"End point with listener name ${listenerName.value} not found for broker $id")
+      case Some(node) => new BrokerEndPoint(node.id(), node.host(), node.port())
+    }
+  }
+}
+
+class MetadataBrokersBuilder(log: Logger, prevBrokers: MetadataBrokers) {
+  private var newBrokerMap = prevBrokers.cloneBrokerMap()
+
+  def add(broker: MetadataBroker): Unit = {
+    newBrokerMap.put(broker.id, broker)
+  }
+
+  def changeFencing(id: Int, fenced: Boolean): Unit = {
+    val broker = newBrokerMap.get(id)
+    if (broker == null) {
+      throw new RuntimeException(s"Unknown broker id ${id}")
+    }
+    val newBroker = new MetadataBroker(broker.id, broker.rack, broker.endpoints, fenced)
+    newBrokerMap.put(id, newBroker)
+  }
+
+  def remove(id: Int): Unit = {
+    newBrokerMap.remove(id)
+  }
+
+  def get(brokerId: Int): Option[MetadataBroker] = Option(newBrokerMap.get(brokerId))
+
+  def build(): MetadataBrokers = {
+    val result = MetadataBrokers(log, newBrokerMap)
+    newBrokerMap = Collections.unmodifiableMap(newBrokerMap)
+    result
+  }
+}
+
+object MetadataBrokers {
+  def apply(log: Logger,
+            brokerMap: util.Map[Integer, MetadataBroker]): MetadataBrokers = {
+    var listenersIdenticalAcrossBrokers = true
+    var prevListeners: collection.Set[String] = null
+    val _aliveBrokers = new util.ArrayList[MetadataBroker](brokerMap.size())

Review comment:
       nit: any reason this has the underscore prefix?

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataBrokers.scala
##########
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.concurrent.ThreadLocalRandom
+
+import kafka.cluster.BrokerEndPoint
+import kafka.common.BrokerEndPointNotAvailableException
+import org.apache.kafka.common.Node
+import org.apache.kafka.common.metadata.RegisterBrokerRecord
+import org.apache.kafka.common.network.ListenerName
+import org.slf4j.Logger
+
+import scala.jdk.CollectionConverters._
+
+object MetadataBroker {
+  def apply(record: RegisterBrokerRecord): MetadataBroker = {
+    new MetadataBroker(record.brokerId(), record.rack(),
+      record.endPoints().asScala.map {
+        case e => e.name() ->
+          new Node(record.brokerId(), e.host(), e.port(), record.rack())
+      }.toMap,
+      true)
+  }
+}
+
+case class MetadataBroker(id: Int,
+                          rack: String,
+                          endpoints: collection.Map[String, Node],
+                          fenced: Boolean) {
+  def brokerEndPoint(listenerName: ListenerName): BrokerEndPoint = {
+    endpoints.get(listenerName.value()) match {
+      case None => throw new BrokerEndPointNotAvailableException(
+        s"End point with listener name ${listenerName.value} not found for broker $id")
+      case Some(node) => new BrokerEndPoint(node.id(), node.host(), node.port())
+    }
+  }
+}
+
+class MetadataBrokersBuilder(log: Logger, prevBrokers: MetadataBrokers) {
+  private var newBrokerMap = prevBrokers.cloneBrokerMap()
+
+  def add(broker: MetadataBroker): Unit = {
+    newBrokerMap.put(broker.id, broker)
+  }
+
+  def changeFencing(id: Int, fenced: Boolean): Unit = {
+    val broker = newBrokerMap.get(id)
+    if (broker == null) {
+      throw new RuntimeException(s"Unknown broker id ${id}")
+    }
+    val newBroker = new MetadataBroker(broker.id, broker.rack, broker.endpoints, fenced)
+    newBrokerMap.put(id, newBroker)
+  }
+
+  def remove(id: Int): Unit = {
+    newBrokerMap.remove(id)
+  }
+
+  def get(brokerId: Int): Option[MetadataBroker] = Option(newBrokerMap.get(brokerId))
+
+  def build(): MetadataBrokers = {
+    val result = MetadataBrokers(log, newBrokerMap)
+    newBrokerMap = Collections.unmodifiableMap(newBrokerMap)
+    result
+  }
+}
+
+object MetadataBrokers {
+  def apply(log: Logger,
+            brokerMap: util.Map[Integer, MetadataBroker]): MetadataBrokers = {
+    var listenersIdenticalAcrossBrokers = true
+    var prevListeners: collection.Set[String] = null
+    val _aliveBrokers = new util.ArrayList[MetadataBroker](brokerMap.size())
+    brokerMap.values().iterator().asScala.foreach {
+      case broker => if (!broker.fenced) {

Review comment:
       nit: we don't need `case` here. Also, we usually put this on the previous line:
   ```scala
   ...foreach { broker =>
   ```
   There are a few of these in the PR.

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataPartitions.scala
##########
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.function.BiConsumer
+
+import org.apache.kafka.common.message.LeaderAndIsrRequestData
+import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
+import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
+import org.apache.kafka.common.metadata.{IsrChangeRecord, PartitionRecord}
+import org.apache.kafka.common.{TopicPartition, Uuid}
+
+import scala.jdk.CollectionConverters._
+
+
+object MetadataPartition {
+  def apply(name: String, record: PartitionRecord): MetadataPartition = {
+    MetadataPartition(name,
+      record.partitionId(),
+      record.leader(),
+      record.leaderEpoch(),
+      record.replicas(),
+      record.isr(),
+      Collections.emptyList(), // TODO: handle offline replicas
+      Collections.emptyList(),
+      Collections.emptyList())
+  }
+
+  def apply(prevPartition: Option[MetadataPartition],
+            partition: UpdateMetadataPartitionState): MetadataPartition = {
+    new MetadataPartition(partition.topicName(),
+      partition.partitionIndex(),
+      partition.leader(),
+      partition.leaderEpoch(),
+      partition.replicas(),
+      partition.isr(),
+      partition.offlineReplicas(),
+      prevPartition.flatMap(p => Some(p.addingReplicas)).getOrElse(Collections.emptyList()),
+      prevPartition.flatMap(p => Some(p.removingReplicas)).getOrElse(Collections.emptyList())
+    )
+  }
+}
+
+case class MetadataPartition(topicName: String,
+                             partitionIndex: Int,
+                             leaderId: Int,
+                             leaderEpoch: Int,
+                             replicas: util.List[Integer],
+                             isr: util.List[Integer],
+                             offlineReplicas: util.List[Integer],
+                             addingReplicas: util.List[Integer],
+                             removingReplicas: util.List[Integer]) {
+  def toTopicPartition(): TopicPartition = new TopicPartition(topicName, partitionIndex)
+
+  def toLeaderAndIsrPartitionState(isNew: Boolean): LeaderAndIsrRequestData.LeaderAndIsrPartitionState = {
+    new LeaderAndIsrPartitionState().setTopicName(topicName).
+      setPartitionIndex(partitionIndex).
+      setLeader(leaderId).
+      setLeaderEpoch(leaderEpoch).
+      setReplicas(replicas).
+      setIsr(isr).
+      setAddingReplicas(addingReplicas).
+      setRemovingReplicas(removingReplicas).
+      setIsNew(isNew)
+    // Note: we don't set ZKVersion here.
+  }
+
+  def isReplicaFor(brokerId: Int): Boolean = replicas.contains(Integer.valueOf(brokerId))
+
+  def copyWithIsrChanges(record: IsrChangeRecord): MetadataPartition = {
+    MetadataPartition(topicName,
+      partitionIndex,
+      record.leader(),
+      record.leaderEpoch(),
+      replicas,
+      record.isr(),
+      offlineReplicas,
+      addingReplicas,
+      removingReplicas)
+  }
+}
+
+class MetadataPartitionsBuilder(val brokerId: Int,
+                                val prevPartitions: MetadataPartitions) {
+  private var newNameMap = prevPartitions.copyNameMap()
+  private var newIdMap = prevPartitions.copyIdMap()
+  private val changed = new util.IdentityHashMap[Any, Boolean]()
+  private val _localChanged = new util.HashSet[MetadataPartition]
+  private val _localRemoved = new util.HashSet[MetadataPartition]
+
+  def topicIdToName(id: Uuid): Option[String] = Option(newIdMap.get(id))
+
+  def removeTopicById(id: Uuid): Iterable[MetadataPartition] = {
+    Option(newIdMap.remove(id)) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${id}")
+      case Some(name) => newNameMap.remove(name).values().asScala
+    }
+  }
+
+  def handleIsrChange(record: IsrChangeRecord): Unit = {
+    Option(newIdMap.get(record.topicId())) match {

Review comment:
       Some helpers might make this less awkward. Maybe a shortcut to get to the partition map?
   

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataPartitions.scala
##########
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.function.BiConsumer
+
+import org.apache.kafka.common.message.LeaderAndIsrRequestData
+import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
+import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
+import org.apache.kafka.common.metadata.{IsrChangeRecord, PartitionRecord}
+import org.apache.kafka.common.{TopicPartition, Uuid}
+
+import scala.jdk.CollectionConverters._
+
+
+object MetadataPartition {
+  def apply(name: String, record: PartitionRecord): MetadataPartition = {
+    MetadataPartition(name,
+      record.partitionId(),
+      record.leader(),
+      record.leaderEpoch(),
+      record.replicas(),
+      record.isr(),
+      Collections.emptyList(), // TODO: handle offline replicas
+      Collections.emptyList(),
+      Collections.emptyList())
+  }
+
+  def apply(prevPartition: Option[MetadataPartition],
+            partition: UpdateMetadataPartitionState): MetadataPartition = {
+    new MetadataPartition(partition.topicName(),
+      partition.partitionIndex(),
+      partition.leader(),
+      partition.leaderEpoch(),
+      partition.replicas(),
+      partition.isr(),
+      partition.offlineReplicas(),
+      prevPartition.flatMap(p => Some(p.addingReplicas)).getOrElse(Collections.emptyList()),
+      prevPartition.flatMap(p => Some(p.removingReplicas)).getOrElse(Collections.emptyList())
+    )
+  }
+}
+
+case class MetadataPartition(topicName: String,
+                             partitionIndex: Int,
+                             leaderId: Int,
+                             leaderEpoch: Int,
+                             replicas: util.List[Integer],
+                             isr: util.List[Integer],
+                             offlineReplicas: util.List[Integer],
+                             addingReplicas: util.List[Integer],
+                             removingReplicas: util.List[Integer]) {
+  def toTopicPartition(): TopicPartition = new TopicPartition(topicName, partitionIndex)
+
+  def toLeaderAndIsrPartitionState(isNew: Boolean): LeaderAndIsrRequestData.LeaderAndIsrPartitionState = {
+    new LeaderAndIsrPartitionState().setTopicName(topicName).
+      setPartitionIndex(partitionIndex).
+      setLeader(leaderId).
+      setLeaderEpoch(leaderEpoch).
+      setReplicas(replicas).
+      setIsr(isr).
+      setAddingReplicas(addingReplicas).
+      setRemovingReplicas(removingReplicas).
+      setIsNew(isNew)
+    // Note: we don't set ZKVersion here.
+  }
+
+  def isReplicaFor(brokerId: Int): Boolean = replicas.contains(Integer.valueOf(brokerId))
+
+  def copyWithIsrChanges(record: IsrChangeRecord): MetadataPartition = {
+    MetadataPartition(topicName,
+      partitionIndex,
+      record.leader(),
+      record.leaderEpoch(),
+      replicas,
+      record.isr(),
+      offlineReplicas,
+      addingReplicas,
+      removingReplicas)
+  }
+}
+
+class MetadataPartitionsBuilder(val brokerId: Int,
+                                val prevPartitions: MetadataPartitions) {
+  private var newNameMap = prevPartitions.copyNameMap()
+  private var newIdMap = prevPartitions.copyIdMap()
+  private val changed = new util.IdentityHashMap[Any, Boolean]()

Review comment:
       I think this is a map just because there is no `IdentityHashSet`. Perhaps we could use `Collections.newSetFromMap`?

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataBrokers.scala
##########
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.concurrent.ThreadLocalRandom
+
+import kafka.cluster.BrokerEndPoint
+import kafka.common.BrokerEndPointNotAvailableException
+import org.apache.kafka.common.Node
+import org.apache.kafka.common.metadata.RegisterBrokerRecord
+import org.apache.kafka.common.network.ListenerName
+import org.slf4j.Logger
+
+import scala.jdk.CollectionConverters._
+
+object MetadataBroker {
+  def apply(record: RegisterBrokerRecord): MetadataBroker = {
+    new MetadataBroker(record.brokerId(), record.rack(),
+      record.endPoints().asScala.map {
+        case e => e.name() ->

Review comment:
       nit: unneeded `case`

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataBrokers.scala
##########
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.concurrent.ThreadLocalRandom
+
+import kafka.cluster.BrokerEndPoint
+import kafka.common.BrokerEndPointNotAvailableException
+import org.apache.kafka.common.Node
+import org.apache.kafka.common.metadata.RegisterBrokerRecord
+import org.apache.kafka.common.network.ListenerName
+import org.slf4j.Logger
+
+import scala.jdk.CollectionConverters._
+
+object MetadataBroker {
+  def apply(record: RegisterBrokerRecord): MetadataBroker = {
+    new MetadataBroker(record.brokerId(), record.rack(),
+      record.endPoints().asScala.map {
+        case e => e.name() ->
+          new Node(record.brokerId(), e.host(), e.port(), record.rack())
+      }.toMap,
+      true)
+  }
+}
+
+case class MetadataBroker(id: Int,
+                          rack: String,
+                          endpoints: collection.Map[String, Node],
+                          fenced: Boolean) {
+  def brokerEndPoint(listenerName: ListenerName): BrokerEndPoint = {
+    endpoints.get(listenerName.value()) match {
+      case None => throw new BrokerEndPointNotAvailableException(
+        s"End point with listener name ${listenerName.value} not found for broker $id")
+      case Some(node) => new BrokerEndPoint(node.id(), node.host(), node.port())
+    }
+  }
+}
+
+class MetadataBrokersBuilder(log: Logger, prevBrokers: MetadataBrokers) {
+  private var newBrokerMap = prevBrokers.cloneBrokerMap()
+
+  def add(broker: MetadataBroker): Unit = {
+    newBrokerMap.put(broker.id, broker)
+  }
+
+  def changeFencing(id: Int, fenced: Boolean): Unit = {
+    val broker = newBrokerMap.get(id)
+    if (broker == null) {
+      throw new RuntimeException(s"Unknown broker id ${id}")
+    }
+    val newBroker = new MetadataBroker(broker.id, broker.rack, broker.endpoints, fenced)
+    newBrokerMap.put(id, newBroker)
+  }
+
+  def remove(id: Int): Unit = {
+    newBrokerMap.remove(id)
+  }
+
+  def get(brokerId: Int): Option[MetadataBroker] = Option(newBrokerMap.get(brokerId))
+
+  def build(): MetadataBrokers = {
+    val result = MetadataBrokers(log, newBrokerMap)
+    newBrokerMap = Collections.unmodifiableMap(newBrokerMap)
+    result
+  }
+}
+
+object MetadataBrokers {
+  def apply(log: Logger,
+            brokerMap: util.Map[Integer, MetadataBroker]): MetadataBrokers = {
+    var listenersIdenticalAcrossBrokers = true
+    var prevListeners: collection.Set[String] = null
+    val _aliveBrokers = new util.ArrayList[MetadataBroker](brokerMap.size())
+    brokerMap.values().iterator().asScala.foreach {
+      case broker => if (!broker.fenced) {
+        if (prevListeners == null) {
+          prevListeners = broker.endpoints.keySet
+        } else if (!prevListeners.equals(broker.endpoints.keySet)) {
+          listenersIdenticalAcrossBrokers = false
+        }
+        _aliveBrokers.add(broker)
+      }
+    }
+    if (!listenersIdenticalAcrossBrokers) {
+      log.error("Listeners are not identical across alive brokers. " +
+        _aliveBrokers.asScala.map(
+          broker => s"${broker.id}: ${broker.endpoints.keySet.mkString(", ")}"))
+    }
+    new MetadataBrokers(_aliveBrokers, brokerMap)
+  }
+}
+
+case class MetadataBrokers(private val _aliveBrokers: util.List[MetadataBroker],
+                           private val brokerMap: util.Map[Integer, MetadataBroker]) {
+  def size(): Int = brokerMap.size()
+
+  def iterator(): Iterator[MetadataBroker] = brokerMap.values().iterator().asScala
+
+  def cloneBrokerMap(): util.Map[Integer, MetadataBroker] = {
+    val result = new util.HashMap[Integer, MetadataBroker]
+    result.putAll(brokerMap)
+    result
+  }
+
+  def getAlive(id: Int): Option[MetadataBroker] = {

Review comment:
       nit: `getAlive` -> `aliveBroker`? Seems more consistent with `aliveBrokers`.

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataBrokers.scala
##########
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.concurrent.ThreadLocalRandom
+
+import kafka.cluster.BrokerEndPoint
+import kafka.common.BrokerEndPointNotAvailableException
+import org.apache.kafka.common.Node
+import org.apache.kafka.common.metadata.RegisterBrokerRecord
+import org.apache.kafka.common.network.ListenerName
+import org.slf4j.Logger
+
+import scala.jdk.CollectionConverters._
+
+object MetadataBroker {
+  def apply(record: RegisterBrokerRecord): MetadataBroker = {
+    new MetadataBroker(record.brokerId(), record.rack(),
+      record.endPoints().asScala.map {
+        case e => e.name() ->
+          new Node(record.brokerId(), e.host(), e.port(), record.rack())
+      }.toMap,
+      true)
+  }
+}
+
+case class MetadataBroker(id: Int,
+                          rack: String,
+                          endpoints: collection.Map[String, Node],
+                          fenced: Boolean) {
+  def brokerEndPoint(listenerName: ListenerName): BrokerEndPoint = {
+    endpoints.get(listenerName.value()) match {
+      case None => throw new BrokerEndPointNotAvailableException(
+        s"End point with listener name ${listenerName.value} not found for broker $id")
+      case Some(node) => new BrokerEndPoint(node.id(), node.host(), node.port())
+    }
+  }
+}
+
+class MetadataBrokersBuilder(log: Logger, prevBrokers: MetadataBrokers) {
+  private var newBrokerMap = prevBrokers.cloneBrokerMap()
+
+  def add(broker: MetadataBroker): Unit = {
+    newBrokerMap.put(broker.id, broker)
+  }
+
+  def changeFencing(id: Int, fenced: Boolean): Unit = {
+    val broker = newBrokerMap.get(id)
+    if (broker == null) {
+      throw new RuntimeException(s"Unknown broker id ${id}")
+    }
+    val newBroker = new MetadataBroker(broker.id, broker.rack, broker.endpoints, fenced)
+    newBrokerMap.put(id, newBroker)
+  }
+
+  def remove(id: Int): Unit = {
+    newBrokerMap.remove(id)
+  }
+
+  def get(brokerId: Int): Option[MetadataBroker] = Option(newBrokerMap.get(brokerId))
+
+  def build(): MetadataBrokers = {
+    val result = MetadataBrokers(log, newBrokerMap)
+    newBrokerMap = Collections.unmodifiableMap(newBrokerMap)
+    result
+  }
+}
+
+object MetadataBrokers {
+  def apply(log: Logger,
+            brokerMap: util.Map[Integer, MetadataBroker]): MetadataBrokers = {
+    var listenersIdenticalAcrossBrokers = true
+    var prevListeners: collection.Set[String] = null
+    val _aliveBrokers = new util.ArrayList[MetadataBroker](brokerMap.size())
+    brokerMap.values().iterator().asScala.foreach {
+      case broker => if (!broker.fenced) {
+        if (prevListeners == null) {
+          prevListeners = broker.endpoints.keySet
+        } else if (!prevListeners.equals(broker.endpoints.keySet)) {
+          listenersIdenticalAcrossBrokers = false
+        }
+        _aliveBrokers.add(broker)
+      }
+    }
+    if (!listenersIdenticalAcrossBrokers) {
+      log.error("Listeners are not identical across alive brokers. " +
+        _aliveBrokers.asScala.map(
+          broker => s"${broker.id}: ${broker.endpoints.keySet.mkString(", ")}"))
+    }
+    new MetadataBrokers(_aliveBrokers, brokerMap)
+  }
+}
+
+case class MetadataBrokers(private val _aliveBrokers: util.List[MetadataBroker],
+                           private val brokerMap: util.Map[Integer, MetadataBroker]) {
+  def size(): Int = brokerMap.size()
+
+  def iterator(): Iterator[MetadataBroker] = brokerMap.values().iterator().asScala
+
+  def cloneBrokerMap(): util.Map[Integer, MetadataBroker] = {
+    val result = new util.HashMap[Integer, MetadataBroker]
+    result.putAll(brokerMap)
+    result
+  }
+
+  def getAlive(id: Int): Option[MetadataBroker] = {
+    val broker = get(id)

Review comment:
       Maybe rewrite this as a match?

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataBrokers.scala
##########
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.concurrent.ThreadLocalRandom
+
+import kafka.cluster.BrokerEndPoint
+import kafka.common.BrokerEndPointNotAvailableException
+import org.apache.kafka.common.Node
+import org.apache.kafka.common.metadata.RegisterBrokerRecord
+import org.apache.kafka.common.network.ListenerName
+import org.slf4j.Logger
+
+import scala.jdk.CollectionConverters._
+
+object MetadataBroker {
+  def apply(record: RegisterBrokerRecord): MetadataBroker = {
+    new MetadataBroker(record.brokerId(), record.rack(),
+      record.endPoints().asScala.map {
+        case e => e.name() ->
+          new Node(record.brokerId(), e.host(), e.port(), record.rack())

Review comment:
       nit: a bunch of cases in here we could drop the unneeded parenthesis

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataImage.scala
##########
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+
+import org.apache.kafka.common.{TopicPartition, Uuid}
+import org.slf4j.Logger
+
+case class MetadataImageBuilder(brokerId: Int,
+                                log: Logger,
+                                prevImage: MetadataImage) {
+  private var _partitionsBuilder: MetadataPartitionsBuilder = null
+  private var _controllerId = prevImage.controllerId
+  private var _brokersBuilder: MetadataBrokersBuilder = null
+
+  def partitionsBuilder(): MetadataPartitionsBuilder = {
+    if (_partitionsBuilder == null) {
+      _partitionsBuilder = new MetadataPartitionsBuilder(brokerId, prevImage.partitions)
+    }
+    _partitionsBuilder
+  }
+
+  def hasPartitionChanges(): Boolean = _partitionsBuilder != null
+
+  def topicIdToName(uuid: Uuid): Option[String] = {
+    if (_partitionsBuilder != null) {
+      _partitionsBuilder.topicIdToName(uuid)
+    } else {
+      prevImage.topicIdToName(uuid)
+    }
+  }
+
+  def setControllerId(controllerId: Option[Int]) = {
+    _controllerId = controllerId
+  }
+
+  def brokersBuilder(): MetadataBrokersBuilder = {
+    if (_brokersBuilder == null) {
+      _brokersBuilder = new MetadataBrokersBuilder(log, prevImage.brokers)
+    }
+    _brokersBuilder
+  }
+
+  def broker(brokerId: Int): Option[MetadataBroker] = {
+    if (_brokersBuilder == null) {
+      prevImage.brokers.get(brokerId)
+    } else {
+      _brokersBuilder.get(brokerId)
+    }
+  }
+
+  def partition(topicName: String, partitionId: Int): Option[MetadataPartition] = {
+    if (_partitionsBuilder == null) {
+      prevImage.partitions.get(topicName, partitionId)
+    } else {
+      _partitionsBuilder.get(topicName, partitionId)
+    }
+  }
+
+  def hasChanges(): Boolean = {
+    _partitionsBuilder != null ||
+      !_controllerId.equals(prevImage.controllerId) ||
+      _brokersBuilder != null
+  }
+
+  def build(): MetadataImage = {
+    val nextPartitions = if (_partitionsBuilder == null) {
+      prevImage.partitions
+    } else {
+      _partitionsBuilder.build()
+    }
+    val nextBrokers = if (_brokersBuilder == null) {
+      prevImage.brokers
+    } else {
+      _brokersBuilder.build()
+    }
+    MetadataImage(nextPartitions, _controllerId, nextBrokers)
+  }
+}
+
+case class MetadataImage(partitions: MetadataPartitions,
+                         controllerId: Option[Int],
+                         brokers: MetadataBrokers) {
+  def this() = {
+    this(new MetadataPartitions(Collections.emptyMap(), Collections.emptyMap()),
+      None,
+      new MetadataBrokers(Collections.emptyList(), new util.HashMap[Integer, MetadataBroker]()))
+  }
+
+  def contains(partition: TopicPartition): Boolean =
+    partitions.get(partition.topic(), partition.partition()).isDefined
+
+  def contains(topic: String): Boolean = partitions.topicPartitions(topic).hasNext
+
+  def getAliveBroker(id: Int): Option[MetadataBroker] = brokers.getAlive(id)
+
+  def numAliveBrokers(): Int = brokers.aliveBrokers().size
+
+  def controller(): Option[MetadataBroker] = controllerId.flatMap(id => brokers.getAlive(id))
+
+  def topicIdToName(uuid: Uuid): Option[String] = {

Review comment:
       nit: `uuid` -> `topicId`

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataImage.scala
##########
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+
+import org.apache.kafka.common.{TopicPartition, Uuid}
+import org.slf4j.Logger
+
+case class MetadataImageBuilder(brokerId: Int,
+                                log: Logger,
+                                prevImage: MetadataImage) {
+  private var _partitionsBuilder: MetadataPartitionsBuilder = null
+  private var _controllerId = prevImage.controllerId
+  private var _brokersBuilder: MetadataBrokersBuilder = null
+
+  def partitionsBuilder(): MetadataPartitionsBuilder = {
+    if (_partitionsBuilder == null) {
+      _partitionsBuilder = new MetadataPartitionsBuilder(brokerId, prevImage.partitions)
+    }
+    _partitionsBuilder
+  }
+
+  def hasPartitionChanges(): Boolean = _partitionsBuilder != null
+
+  def topicIdToName(uuid: Uuid): Option[String] = {
+    if (_partitionsBuilder != null) {
+      _partitionsBuilder.topicIdToName(uuid)
+    } else {
+      prevImage.topicIdToName(uuid)
+    }
+  }
+
+  def setControllerId(controllerId: Option[Int]) = {
+    _controllerId = controllerId
+  }
+
+  def brokersBuilder(): MetadataBrokersBuilder = {
+    if (_brokersBuilder == null) {
+      _brokersBuilder = new MetadataBrokersBuilder(log, prevImage.brokers)
+    }
+    _brokersBuilder
+  }
+
+  def broker(brokerId: Int): Option[MetadataBroker] = {
+    if (_brokersBuilder == null) {
+      prevImage.brokers.get(brokerId)
+    } else {
+      _brokersBuilder.get(brokerId)
+    }
+  }
+
+  def partition(topicName: String, partitionId: Int): Option[MetadataPartition] = {
+    if (_partitionsBuilder == null) {
+      prevImage.partitions.get(topicName, partitionId)
+    } else {
+      _partitionsBuilder.get(topicName, partitionId)
+    }
+  }
+
+  def hasChanges(): Boolean = {
+    _partitionsBuilder != null ||
+      !_controllerId.equals(prevImage.controllerId) ||
+      _brokersBuilder != null
+  }
+
+  def build(): MetadataImage = {
+    val nextPartitions = if (_partitionsBuilder == null) {
+      prevImage.partitions
+    } else {
+      _partitionsBuilder.build()
+    }
+    val nextBrokers = if (_brokersBuilder == null) {
+      prevImage.brokers
+    } else {
+      _brokersBuilder.build()
+    }
+    MetadataImage(nextPartitions, _controllerId, nextBrokers)
+  }
+}
+
+case class MetadataImage(partitions: MetadataPartitions,
+                         controllerId: Option[Int],
+                         brokers: MetadataBrokers) {
+  def this() = {
+    this(new MetadataPartitions(Collections.emptyMap(), Collections.emptyMap()),
+      None,
+      new MetadataBrokers(Collections.emptyList(), new util.HashMap[Integer, MetadataBroker]()))
+  }
+
+  def contains(partition: TopicPartition): Boolean =
+    partitions.get(partition.topic(), partition.partition()).isDefined
+
+  def contains(topic: String): Boolean = partitions.topicPartitions(topic).hasNext
+
+  def getAliveBroker(id: Int): Option[MetadataBroker] = brokers.getAlive(id)

Review comment:
       nit: drop `get`

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataPartitions.scala
##########
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.function.BiConsumer
+
+import org.apache.kafka.common.message.LeaderAndIsrRequestData
+import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
+import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
+import org.apache.kafka.common.metadata.{IsrChangeRecord, PartitionRecord}
+import org.apache.kafka.common.{TopicPartition, Uuid}
+
+import scala.jdk.CollectionConverters._
+
+
+object MetadataPartition {
+  def apply(name: String, record: PartitionRecord): MetadataPartition = {
+    MetadataPartition(name,
+      record.partitionId(),
+      record.leader(),
+      record.leaderEpoch(),
+      record.replicas(),
+      record.isr(),
+      Collections.emptyList(), // TODO: handle offline replicas
+      Collections.emptyList(),
+      Collections.emptyList())
+  }
+
+  def apply(prevPartition: Option[MetadataPartition],
+            partition: UpdateMetadataPartitionState): MetadataPartition = {
+    new MetadataPartition(partition.topicName(),
+      partition.partitionIndex(),
+      partition.leader(),
+      partition.leaderEpoch(),
+      partition.replicas(),
+      partition.isr(),
+      partition.offlineReplicas(),
+      prevPartition.flatMap(p => Some(p.addingReplicas)).getOrElse(Collections.emptyList()),
+      prevPartition.flatMap(p => Some(p.removingReplicas)).getOrElse(Collections.emptyList())
+    )
+  }
+}
+
+case class MetadataPartition(topicName: String,
+                             partitionIndex: Int,
+                             leaderId: Int,
+                             leaderEpoch: Int,
+                             replicas: util.List[Integer],
+                             isr: util.List[Integer],
+                             offlineReplicas: util.List[Integer],
+                             addingReplicas: util.List[Integer],
+                             removingReplicas: util.List[Integer]) {
+  def toTopicPartition(): TopicPartition = new TopicPartition(topicName, partitionIndex)
+
+  def toLeaderAndIsrPartitionState(isNew: Boolean): LeaderAndIsrRequestData.LeaderAndIsrPartitionState = {
+    new LeaderAndIsrPartitionState().setTopicName(topicName).
+      setPartitionIndex(partitionIndex).
+      setLeader(leaderId).
+      setLeaderEpoch(leaderEpoch).
+      setReplicas(replicas).
+      setIsr(isr).
+      setAddingReplicas(addingReplicas).
+      setRemovingReplicas(removingReplicas).
+      setIsNew(isNew)
+    // Note: we don't set ZKVersion here.
+  }
+
+  def isReplicaFor(brokerId: Int): Boolean = replicas.contains(Integer.valueOf(brokerId))
+
+  def copyWithIsrChanges(record: IsrChangeRecord): MetadataPartition = {
+    MetadataPartition(topicName,
+      partitionIndex,
+      record.leader(),
+      record.leaderEpoch(),
+      replicas,
+      record.isr(),
+      offlineReplicas,
+      addingReplicas,
+      removingReplicas)
+  }
+}
+
+class MetadataPartitionsBuilder(val brokerId: Int,
+                                val prevPartitions: MetadataPartitions) {
+  private var newNameMap = prevPartitions.copyNameMap()
+  private var newIdMap = prevPartitions.copyIdMap()
+  private val changed = new util.IdentityHashMap[Any, Boolean]()
+  private val _localChanged = new util.HashSet[MetadataPartition]
+  private val _localRemoved = new util.HashSet[MetadataPartition]
+
+  def topicIdToName(id: Uuid): Option[String] = Option(newIdMap.get(id))
+
+  def removeTopicById(id: Uuid): Iterable[MetadataPartition] = {
+    Option(newIdMap.remove(id)) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${id}")
+      case Some(name) => newNameMap.remove(name).values().asScala
+    }
+  }
+
+  def handleIsrChange(record: IsrChangeRecord): Unit = {
+    Option(newIdMap.get(record.topicId())) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${record.topicId()}")
+      case Some(name) => Option(newNameMap.get(name)) match {
+        case None => throw new RuntimeException(s"Unable to locate topic with name ${name}")
+        case Some(partitionMap) => Option(partitionMap.get(record.partitionId())) match {
+          case None => throw new RuntimeException(s"Unable to locate ${name}-${record.partitionId}")
+          case Some(partition) => set(partition.copyWithIsrChanges(record))
+        }
+      }
+    }
+  }
+
+  def addUuidMapping(name: String, id: Uuid): Unit = {
+    newIdMap.put(id, name)
+  }
+
+  def removeUuidMapping(id: Uuid): Unit = {
+    newIdMap.remove(id)
+  }
+
+  def get(topicName: String, partitionId: Int): Option[MetadataPartition] = {

Review comment:
       nit: I think a more explicit name would help. This naming assumes that it is clear enough in the context of `MetadataPartitions`, but it can get a little confusing when you get a few methods chained together.

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataPartitions.scala
##########
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.function.BiConsumer
+
+import org.apache.kafka.common.message.LeaderAndIsrRequestData
+import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
+import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
+import org.apache.kafka.common.metadata.{IsrChangeRecord, PartitionRecord}
+import org.apache.kafka.common.{TopicPartition, Uuid}
+
+import scala.jdk.CollectionConverters._
+
+
+object MetadataPartition {
+  def apply(name: String, record: PartitionRecord): MetadataPartition = {
+    MetadataPartition(name,
+      record.partitionId(),
+      record.leader(),
+      record.leaderEpoch(),
+      record.replicas(),
+      record.isr(),
+      Collections.emptyList(), // TODO: handle offline replicas
+      Collections.emptyList(),
+      Collections.emptyList())
+  }
+
+  def apply(prevPartition: Option[MetadataPartition],
+            partition: UpdateMetadataPartitionState): MetadataPartition = {
+    new MetadataPartition(partition.topicName(),
+      partition.partitionIndex(),
+      partition.leader(),
+      partition.leaderEpoch(),
+      partition.replicas(),
+      partition.isr(),
+      partition.offlineReplicas(),
+      prevPartition.flatMap(p => Some(p.addingReplicas)).getOrElse(Collections.emptyList()),
+      prevPartition.flatMap(p => Some(p.removingReplicas)).getOrElse(Collections.emptyList())
+    )
+  }
+}
+
+case class MetadataPartition(topicName: String,
+                             partitionIndex: Int,
+                             leaderId: Int,
+                             leaderEpoch: Int,
+                             replicas: util.List[Integer],
+                             isr: util.List[Integer],
+                             offlineReplicas: util.List[Integer],
+                             addingReplicas: util.List[Integer],
+                             removingReplicas: util.List[Integer]) {
+  def toTopicPartition(): TopicPartition = new TopicPartition(topicName, partitionIndex)
+
+  def toLeaderAndIsrPartitionState(isNew: Boolean): LeaderAndIsrRequestData.LeaderAndIsrPartitionState = {
+    new LeaderAndIsrPartitionState().setTopicName(topicName).
+      setPartitionIndex(partitionIndex).
+      setLeader(leaderId).
+      setLeaderEpoch(leaderEpoch).
+      setReplicas(replicas).
+      setIsr(isr).
+      setAddingReplicas(addingReplicas).
+      setRemovingReplicas(removingReplicas).
+      setIsNew(isNew)
+    // Note: we don't set ZKVersion here.
+  }
+
+  def isReplicaFor(brokerId: Int): Boolean = replicas.contains(Integer.valueOf(brokerId))
+
+  def copyWithIsrChanges(record: IsrChangeRecord): MetadataPartition = {
+    MetadataPartition(topicName,
+      partitionIndex,
+      record.leader(),
+      record.leaderEpoch(),
+      replicas,
+      record.isr(),
+      offlineReplicas,
+      addingReplicas,
+      removingReplicas)
+  }
+}
+
+class MetadataPartitionsBuilder(val brokerId: Int,
+                                val prevPartitions: MetadataPartitions) {
+  private var newNameMap = prevPartitions.copyNameMap()
+  private var newIdMap = prevPartitions.copyIdMap()
+  private val changed = new util.IdentityHashMap[Any, Boolean]()
+  private val _localChanged = new util.HashSet[MetadataPartition]

Review comment:
       nit: the naming convention is not very clear to me. Not sure why these two collections get an underscore, but `changed` does not.

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataPartitions.scala
##########
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.function.BiConsumer
+
+import org.apache.kafka.common.message.LeaderAndIsrRequestData
+import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
+import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
+import org.apache.kafka.common.metadata.{IsrChangeRecord, PartitionRecord}
+import org.apache.kafka.common.{TopicPartition, Uuid}
+
+import scala.jdk.CollectionConverters._
+
+
+object MetadataPartition {
+  def apply(name: String, record: PartitionRecord): MetadataPartition = {
+    MetadataPartition(name,
+      record.partitionId(),
+      record.leader(),
+      record.leaderEpoch(),
+      record.replicas(),
+      record.isr(),
+      Collections.emptyList(), // TODO: handle offline replicas
+      Collections.emptyList(),
+      Collections.emptyList())
+  }
+
+  def apply(prevPartition: Option[MetadataPartition],
+            partition: UpdateMetadataPartitionState): MetadataPartition = {
+    new MetadataPartition(partition.topicName(),
+      partition.partitionIndex(),
+      partition.leader(),
+      partition.leaderEpoch(),
+      partition.replicas(),
+      partition.isr(),
+      partition.offlineReplicas(),
+      prevPartition.flatMap(p => Some(p.addingReplicas)).getOrElse(Collections.emptyList()),
+      prevPartition.flatMap(p => Some(p.removingReplicas)).getOrElse(Collections.emptyList())
+    )
+  }
+}
+
+case class MetadataPartition(topicName: String,
+                             partitionIndex: Int,
+                             leaderId: Int,
+                             leaderEpoch: Int,
+                             replicas: util.List[Integer],
+                             isr: util.List[Integer],
+                             offlineReplicas: util.List[Integer],
+                             addingReplicas: util.List[Integer],
+                             removingReplicas: util.List[Integer]) {
+  def toTopicPartition(): TopicPartition = new TopicPartition(topicName, partitionIndex)
+
+  def toLeaderAndIsrPartitionState(isNew: Boolean): LeaderAndIsrRequestData.LeaderAndIsrPartitionState = {
+    new LeaderAndIsrPartitionState().setTopicName(topicName).
+      setPartitionIndex(partitionIndex).
+      setLeader(leaderId).
+      setLeaderEpoch(leaderEpoch).
+      setReplicas(replicas).
+      setIsr(isr).
+      setAddingReplicas(addingReplicas).
+      setRemovingReplicas(removingReplicas).
+      setIsNew(isNew)
+    // Note: we don't set ZKVersion here.
+  }
+
+  def isReplicaFor(brokerId: Int): Boolean = replicas.contains(Integer.valueOf(brokerId))
+
+  def copyWithIsrChanges(record: IsrChangeRecord): MetadataPartition = {
+    MetadataPartition(topicName,
+      partitionIndex,
+      record.leader(),
+      record.leaderEpoch(),
+      replicas,
+      record.isr(),
+      offlineReplicas,
+      addingReplicas,
+      removingReplicas)
+  }
+}
+
+class MetadataPartitionsBuilder(val brokerId: Int,
+                                val prevPartitions: MetadataPartitions) {
+  private var newNameMap = prevPartitions.copyNameMap()
+  private var newIdMap = prevPartitions.copyIdMap()
+  private val changed = new util.IdentityHashMap[Any, Boolean]()
+  private val _localChanged = new util.HashSet[MetadataPartition]
+  private val _localRemoved = new util.HashSet[MetadataPartition]
+
+  def topicIdToName(id: Uuid): Option[String] = Option(newIdMap.get(id))
+
+  def removeTopicById(id: Uuid): Iterable[MetadataPartition] = {
+    Option(newIdMap.remove(id)) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${id}")
+      case Some(name) => newNameMap.remove(name).values().asScala
+    }
+  }
+
+  def handleIsrChange(record: IsrChangeRecord): Unit = {
+    Option(newIdMap.get(record.topicId())) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${record.topicId()}")
+      case Some(name) => Option(newNameMap.get(name)) match {
+        case None => throw new RuntimeException(s"Unable to locate topic with name ${name}")
+        case Some(partitionMap) => Option(partitionMap.get(record.partitionId())) match {
+          case None => throw new RuntimeException(s"Unable to locate ${name}-${record.partitionId}")
+          case Some(partition) => set(partition.copyWithIsrChanges(record))
+        }
+      }
+    }
+  }
+
+  def addUuidMapping(name: String, id: Uuid): Unit = {
+    newIdMap.put(id, name)
+  }
+
+  def removeUuidMapping(id: Uuid): Unit = {
+    newIdMap.remove(id)
+  }
+
+  def get(topicName: String, partitionId: Int): Option[MetadataPartition] = {
+    Option(newNameMap.get(topicName)).flatMap(m => Option(m.get(partitionId)))
+  }
+
+  def set(partition: MetadataPartition): Unit = {

Review comment:
       We can probably leave this as a future improvement, but I'm a little concerned that we don't have the topicId here. I am guessing that the caller has other state assertions which ensure that partition state is updated consistently with respect to the topic id map, but it might also be nice if we could be more defensive about protecting internal state.

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataImage.scala
##########
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+
+import org.apache.kafka.common.{TopicPartition, Uuid}
+import org.slf4j.Logger
+
+case class MetadataImageBuilder(brokerId: Int,
+                                log: Logger,
+                                prevImage: MetadataImage) {
+  private var _partitionsBuilder: MetadataPartitionsBuilder = null
+  private var _controllerId = prevImage.controllerId
+  private var _brokersBuilder: MetadataBrokersBuilder = null
+
+  def partitionsBuilder(): MetadataPartitionsBuilder = {
+    if (_partitionsBuilder == null) {
+      _partitionsBuilder = new MetadataPartitionsBuilder(brokerId, prevImage.partitions)
+    }
+    _partitionsBuilder
+  }
+
+  def hasPartitionChanges(): Boolean = _partitionsBuilder != null
+
+  def topicIdToName(uuid: Uuid): Option[String] = {

Review comment:
       nit: `uuid` -> `topicId`

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataPartitions.scala
##########
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.function.BiConsumer
+
+import org.apache.kafka.common.message.LeaderAndIsrRequestData
+import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
+import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
+import org.apache.kafka.common.metadata.{IsrChangeRecord, PartitionRecord}
+import org.apache.kafka.common.{TopicPartition, Uuid}
+
+import scala.jdk.CollectionConverters._
+
+
+object MetadataPartition {
+  def apply(name: String, record: PartitionRecord): MetadataPartition = {
+    MetadataPartition(name,
+      record.partitionId(),
+      record.leader(),
+      record.leaderEpoch(),
+      record.replicas(),
+      record.isr(),
+      Collections.emptyList(), // TODO: handle offline replicas

Review comment:
       Can we turn the TODO into a jira?

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataPartitions.scala
##########
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.function.BiConsumer
+
+import org.apache.kafka.common.message.LeaderAndIsrRequestData
+import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
+import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
+import org.apache.kafka.common.metadata.{IsrChangeRecord, PartitionRecord}
+import org.apache.kafka.common.{TopicPartition, Uuid}
+
+import scala.jdk.CollectionConverters._
+
+
+object MetadataPartition {
+  def apply(name: String, record: PartitionRecord): MetadataPartition = {
+    MetadataPartition(name,
+      record.partitionId(),
+      record.leader(),
+      record.leaderEpoch(),
+      record.replicas(),
+      record.isr(),
+      Collections.emptyList(), // TODO: handle offline replicas
+      Collections.emptyList(),
+      Collections.emptyList())
+  }
+
+  def apply(prevPartition: Option[MetadataPartition],
+            partition: UpdateMetadataPartitionState): MetadataPartition = {
+    new MetadataPartition(partition.topicName(),
+      partition.partitionIndex(),
+      partition.leader(),
+      partition.leaderEpoch(),
+      partition.replicas(),
+      partition.isr(),
+      partition.offlineReplicas(),
+      prevPartition.flatMap(p => Some(p.addingReplicas)).getOrElse(Collections.emptyList()),
+      prevPartition.flatMap(p => Some(p.removingReplicas)).getOrElse(Collections.emptyList())
+    )
+  }
+}
+
+case class MetadataPartition(topicName: String,
+                             partitionIndex: Int,
+                             leaderId: Int,
+                             leaderEpoch: Int,
+                             replicas: util.List[Integer],
+                             isr: util.List[Integer],
+                             offlineReplicas: util.List[Integer],
+                             addingReplicas: util.List[Integer],
+                             removingReplicas: util.List[Integer]) {
+  def toTopicPartition(): TopicPartition = new TopicPartition(topicName, partitionIndex)
+
+  def toLeaderAndIsrPartitionState(isNew: Boolean): LeaderAndIsrRequestData.LeaderAndIsrPartitionState = {
+    new LeaderAndIsrPartitionState().setTopicName(topicName).
+      setPartitionIndex(partitionIndex).
+      setLeader(leaderId).
+      setLeaderEpoch(leaderEpoch).
+      setReplicas(replicas).
+      setIsr(isr).
+      setAddingReplicas(addingReplicas).
+      setRemovingReplicas(removingReplicas).
+      setIsNew(isNew)
+    // Note: we don't set ZKVersion here.
+  }
+
+  def isReplicaFor(brokerId: Int): Boolean = replicas.contains(Integer.valueOf(brokerId))
+
+  def copyWithIsrChanges(record: IsrChangeRecord): MetadataPartition = {
+    MetadataPartition(topicName,
+      partitionIndex,
+      record.leader(),
+      record.leaderEpoch(),
+      replicas,
+      record.isr(),
+      offlineReplicas,
+      addingReplicas,
+      removingReplicas)
+  }
+}
+
+class MetadataPartitionsBuilder(val brokerId: Int,
+                                val prevPartitions: MetadataPartitions) {
+  private var newNameMap = prevPartitions.copyNameMap()
+  private var newIdMap = prevPartitions.copyIdMap()
+  private val changed = new util.IdentityHashMap[Any, Boolean]()
+  private val _localChanged = new util.HashSet[MetadataPartition]
+  private val _localRemoved = new util.HashSet[MetadataPartition]
+
+  def topicIdToName(id: Uuid): Option[String] = Option(newIdMap.get(id))
+
+  def removeTopicById(id: Uuid): Iterable[MetadataPartition] = {
+    Option(newIdMap.remove(id)) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${id}")

Review comment:
       nit: `${id}` -> `$id` (there are a few of these in here)

##########
File path: core/src/test/scala/kafka/server/metadata/MetadataBrokersTest.scala
##########
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util.Collections
+import kafka.utils.TestUtils
+import org.junit.jupiter.api.Assertions._
+import org.junit.jupiter.api.{Test, Timeout}
+import org.slf4j.LoggerFactory
+
+import java.util.concurrent.TimeUnit
+import scala.collection.mutable
+import scala.jdk.CollectionConverters._
+
+
+@Timeout(value = 120000, unit = TimeUnit.MILLISECONDS)
+class MetadataBrokersTest {
+
+  val log = LoggerFactory.getLogger(classOf[MetadataBrokersTest])
+
+  val emptyBrokers = new MetadataBrokers(Collections.emptyList(), Collections.emptyMap())
+
+  @Test
+  def testBuildBrokers(): Unit = {
+    val builder = new MetadataBrokersBuilder(log, emptyBrokers)
+    builder.add(TestUtils.createMetadataBroker(0))
+    builder.add(TestUtils.createMetadataBroker(1))
+    builder.add(TestUtils.createMetadataBroker(2))
+    builder.add(TestUtils.createMetadataBroker(3))
+    builder.remove(0)
+    val brokers = builder.build()
+    val found = new mutable.HashSet[MetadataBroker]
+    brokers.iterator().foreach { found += _ }
+    val expected = new mutable.HashSet[MetadataBroker]
+    expected += TestUtils.createMetadataBroker(1)
+    expected += TestUtils.createMetadataBroker(2)
+    expected += TestUtils.createMetadataBroker(3)
+    assertEquals(expected, found)
+  }
+
+  @Test
+  def testChangeFencing(): Unit = {
+    val builder = new MetadataBrokersBuilder(log, emptyBrokers)
+    assertEquals(None, builder.get(0))
+    assertThrows(classOf[RuntimeException], () => builder.changeFencing(0, false))
+    builder.add(TestUtils.createMetadataBroker(0, fenced = true))
+    assertTrue(builder.get(0).get.fenced)
+    builder.changeFencing(0, false)
+    assertFalse(builder.get(0).get.fenced)

Review comment:
       Should we verify the result after `build()` as well?

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataPartitions.scala
##########
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.function.BiConsumer
+
+import org.apache.kafka.common.message.LeaderAndIsrRequestData
+import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
+import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
+import org.apache.kafka.common.metadata.{IsrChangeRecord, PartitionRecord}
+import org.apache.kafka.common.{TopicPartition, Uuid}
+
+import scala.jdk.CollectionConverters._
+
+
+object MetadataPartition {
+  def apply(name: String, record: PartitionRecord): MetadataPartition = {
+    MetadataPartition(name,
+      record.partitionId(),
+      record.leader(),
+      record.leaderEpoch(),
+      record.replicas(),
+      record.isr(),
+      Collections.emptyList(), // TODO: handle offline replicas
+      Collections.emptyList(),
+      Collections.emptyList())
+  }
+
+  def apply(prevPartition: Option[MetadataPartition],
+            partition: UpdateMetadataPartitionState): MetadataPartition = {
+    new MetadataPartition(partition.topicName(),
+      partition.partitionIndex(),
+      partition.leader(),
+      partition.leaderEpoch(),
+      partition.replicas(),
+      partition.isr(),
+      partition.offlineReplicas(),
+      prevPartition.flatMap(p => Some(p.addingReplicas)).getOrElse(Collections.emptyList()),
+      prevPartition.flatMap(p => Some(p.removingReplicas)).getOrElse(Collections.emptyList())
+    )
+  }
+}
+
+case class MetadataPartition(topicName: String,
+                             partitionIndex: Int,
+                             leaderId: Int,
+                             leaderEpoch: Int,
+                             replicas: util.List[Integer],
+                             isr: util.List[Integer],
+                             offlineReplicas: util.List[Integer],
+                             addingReplicas: util.List[Integer],
+                             removingReplicas: util.List[Integer]) {
+  def toTopicPartition(): TopicPartition = new TopicPartition(topicName, partitionIndex)
+
+  def toLeaderAndIsrPartitionState(isNew: Boolean): LeaderAndIsrRequestData.LeaderAndIsrPartitionState = {
+    new LeaderAndIsrPartitionState().setTopicName(topicName).
+      setPartitionIndex(partitionIndex).
+      setLeader(leaderId).
+      setLeaderEpoch(leaderEpoch).
+      setReplicas(replicas).
+      setIsr(isr).
+      setAddingReplicas(addingReplicas).
+      setRemovingReplicas(removingReplicas).
+      setIsNew(isNew)
+    // Note: we don't set ZKVersion here.
+  }
+
+  def isReplicaFor(brokerId: Int): Boolean = replicas.contains(Integer.valueOf(brokerId))
+
+  def copyWithIsrChanges(record: IsrChangeRecord): MetadataPartition = {
+    MetadataPartition(topicName,
+      partitionIndex,
+      record.leader(),
+      record.leaderEpoch(),
+      replicas,
+      record.isr(),
+      offlineReplicas,
+      addingReplicas,
+      removingReplicas)
+  }
+}
+
+class MetadataPartitionsBuilder(val brokerId: Int,
+                                val prevPartitions: MetadataPartitions) {
+  private var newNameMap = prevPartitions.copyNameMap()
+  private var newIdMap = prevPartitions.copyIdMap()
+  private val changed = new util.IdentityHashMap[Any, Boolean]()
+  private val _localChanged = new util.HashSet[MetadataPartition]
+  private val _localRemoved = new util.HashSet[MetadataPartition]
+
+  def topicIdToName(id: Uuid): Option[String] = Option(newIdMap.get(id))
+
+  def removeTopicById(id: Uuid): Iterable[MetadataPartition] = {
+    Option(newIdMap.remove(id)) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${id}")
+      case Some(name) => newNameMap.remove(name).values().asScala
+    }
+  }
+
+  def handleIsrChange(record: IsrChangeRecord): Unit = {
+    Option(newIdMap.get(record.topicId())) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${record.topicId()}")
+      case Some(name) => Option(newNameMap.get(name)) match {
+        case None => throw new RuntimeException(s"Unable to locate topic with name ${name}")
+        case Some(partitionMap) => Option(partitionMap.get(record.partitionId())) match {
+          case None => throw new RuntimeException(s"Unable to locate ${name}-${record.partitionId}")
+          case Some(partition) => set(partition.copyWithIsrChanges(record))
+        }
+      }
+    }
+  }
+
+  def addUuidMapping(name: String, id: Uuid): Unit = {
+    newIdMap.put(id, name)
+  }
+
+  def removeUuidMapping(id: Uuid): Unit = {
+    newIdMap.remove(id)
+  }
+
+  def get(topicName: String, partitionId: Int): Option[MetadataPartition] = {
+    Option(newNameMap.get(topicName)).flatMap(m => Option(m.get(partitionId)))
+  }
+
+  def set(partition: MetadataPartition): Unit = {
+    val prevPartitionMap = newNameMap.get(partition.topicName)
+    val newPartitionMap = if (prevPartitionMap == null) {
+      val m = new util.HashMap[Int, MetadataPartition](1)
+      changed.put(m, true)
+      m
+    } else if (changed.containsKey(prevPartitionMap)) {
+      prevPartitionMap
+    } else {
+      val m = new util.HashMap[Int, MetadataPartition](prevPartitionMap.size() + 1)
+      m.putAll(prevPartitionMap)
+      changed.put(m, true)
+      m
+    }
+    val prevPartition = newPartitionMap.put(partition.partitionIndex, partition)
+    if (partition.isReplicaFor(brokerId)) {
+      _localChanged.add(partition)
+    } else if (prevPartition != null && prevPartition.isReplicaFor(brokerId)) {
+      _localRemoved.add(prevPartition)
+    }
+    newNameMap.put(partition.topicName, newPartitionMap)
+  }
+
+  def remove(topicName: String, partitionId: Int): Unit = {
+    val prevPartitionMap = newNameMap.get(topicName)
+    if (prevPartitionMap != null) {
+      if (changed.containsKey(prevPartitionMap)) {
+        val prevPartition = prevPartitionMap.remove(partitionId)
+        if (prevPartition.isReplicaFor(brokerId)) {
+          _localRemoved.add(prevPartition)
+        }
+      } else {
+        Option(prevPartitionMap.get(partitionId)).foreach { prevPartition =>
+          if (prevPartition.isReplicaFor(brokerId)) {
+            _localRemoved.add(prevPartition)
+          }
+          val newPartitionMap = new util.HashMap[Int, MetadataPartition](prevPartitionMap.size() - 1)
+          prevPartitionMap.forEach(new BiConsumer[Int, MetadataPartition]() {

Review comment:
       nit: can rewrite this as a lambda:
   ```scala
   prevPartitionMap.forEach { (partitionId, metadataPartition =>
   ```

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataPartitions.scala
##########
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.function.BiConsumer
+
+import org.apache.kafka.common.message.LeaderAndIsrRequestData
+import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
+import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
+import org.apache.kafka.common.metadata.{IsrChangeRecord, PartitionRecord}
+import org.apache.kafka.common.{TopicPartition, Uuid}
+
+import scala.jdk.CollectionConverters._
+
+
+object MetadataPartition {
+  def apply(name: String, record: PartitionRecord): MetadataPartition = {
+    MetadataPartition(name,
+      record.partitionId(),
+      record.leader(),
+      record.leaderEpoch(),
+      record.replicas(),
+      record.isr(),
+      Collections.emptyList(), // TODO: handle offline replicas
+      Collections.emptyList(),
+      Collections.emptyList())
+  }
+
+  def apply(prevPartition: Option[MetadataPartition],
+            partition: UpdateMetadataPartitionState): MetadataPartition = {
+    new MetadataPartition(partition.topicName(),
+      partition.partitionIndex(),
+      partition.leader(),
+      partition.leaderEpoch(),
+      partition.replicas(),
+      partition.isr(),
+      partition.offlineReplicas(),
+      prevPartition.flatMap(p => Some(p.addingReplicas)).getOrElse(Collections.emptyList()),
+      prevPartition.flatMap(p => Some(p.removingReplicas)).getOrElse(Collections.emptyList())
+    )
+  }
+}
+
+case class MetadataPartition(topicName: String,
+                             partitionIndex: Int,
+                             leaderId: Int,
+                             leaderEpoch: Int,
+                             replicas: util.List[Integer],
+                             isr: util.List[Integer],
+                             offlineReplicas: util.List[Integer],
+                             addingReplicas: util.List[Integer],
+                             removingReplicas: util.List[Integer]) {
+  def toTopicPartition(): TopicPartition = new TopicPartition(topicName, partitionIndex)
+
+  def toLeaderAndIsrPartitionState(isNew: Boolean): LeaderAndIsrRequestData.LeaderAndIsrPartitionState = {
+    new LeaderAndIsrPartitionState().setTopicName(topicName).
+      setPartitionIndex(partitionIndex).
+      setLeader(leaderId).
+      setLeaderEpoch(leaderEpoch).
+      setReplicas(replicas).
+      setIsr(isr).
+      setAddingReplicas(addingReplicas).
+      setRemovingReplicas(removingReplicas).
+      setIsNew(isNew)
+    // Note: we don't set ZKVersion here.
+  }
+
+  def isReplicaFor(brokerId: Int): Boolean = replicas.contains(Integer.valueOf(brokerId))
+
+  def copyWithIsrChanges(record: IsrChangeRecord): MetadataPartition = {
+    MetadataPartition(topicName,
+      partitionIndex,
+      record.leader(),
+      record.leaderEpoch(),
+      replicas,
+      record.isr(),
+      offlineReplicas,
+      addingReplicas,
+      removingReplicas)
+  }
+}
+
+class MetadataPartitionsBuilder(val brokerId: Int,
+                                val prevPartitions: MetadataPartitions) {
+  private var newNameMap = prevPartitions.copyNameMap()
+  private var newIdMap = prevPartitions.copyIdMap()
+  private val changed = new util.IdentityHashMap[Any, Boolean]()
+  private val _localChanged = new util.HashSet[MetadataPartition]
+  private val _localRemoved = new util.HashSet[MetadataPartition]
+
+  def topicIdToName(id: Uuid): Option[String] = Option(newIdMap.get(id))
+
+  def removeTopicById(id: Uuid): Iterable[MetadataPartition] = {
+    Option(newIdMap.remove(id)) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${id}")
+      case Some(name) => newNameMap.remove(name).values().asScala
+    }
+  }
+
+  def handleIsrChange(record: IsrChangeRecord): Unit = {
+    Option(newIdMap.get(record.topicId())) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${record.topicId()}")
+      case Some(name) => Option(newNameMap.get(name)) match {
+        case None => throw new RuntimeException(s"Unable to locate topic with name ${name}")
+        case Some(partitionMap) => Option(partitionMap.get(record.partitionId())) match {
+          case None => throw new RuntimeException(s"Unable to locate ${name}-${record.partitionId}")
+          case Some(partition) => set(partition.copyWithIsrChanges(record))
+        }
+      }
+    }
+  }
+
+  def addUuidMapping(name: String, id: Uuid): Unit = {
+    newIdMap.put(id, name)
+  }
+
+  def removeUuidMapping(id: Uuid): Unit = {
+    newIdMap.remove(id)
+  }
+
+  def get(topicName: String, partitionId: Int): Option[MetadataPartition] = {
+    Option(newNameMap.get(topicName)).flatMap(m => Option(m.get(partitionId)))
+  }
+
+  def set(partition: MetadataPartition): Unit = {
+    val prevPartitionMap = newNameMap.get(partition.topicName)
+    val newPartitionMap = if (prevPartitionMap == null) {
+      val m = new util.HashMap[Int, MetadataPartition](1)
+      changed.put(m, true)
+      m
+    } else if (changed.containsKey(prevPartitionMap)) {
+      prevPartitionMap
+    } else {
+      val m = new util.HashMap[Int, MetadataPartition](prevPartitionMap.size() + 1)
+      m.putAll(prevPartitionMap)
+      changed.put(m, true)
+      m
+    }
+    val prevPartition = newPartitionMap.put(partition.partitionIndex, partition)
+    if (partition.isReplicaFor(brokerId)) {
+      _localChanged.add(partition)
+    } else if (prevPartition != null && prevPartition.isReplicaFor(brokerId)) {
+      _localRemoved.add(prevPartition)
+    }
+    newNameMap.put(partition.topicName, newPartitionMap)
+  }
+
+  def remove(topicName: String, partitionId: Int): Unit = {
+    val prevPartitionMap = newNameMap.get(topicName)
+    if (prevPartitionMap != null) {
+      if (changed.containsKey(prevPartitionMap)) {
+        val prevPartition = prevPartitionMap.remove(partitionId)
+        if (prevPartition.isReplicaFor(brokerId)) {
+          _localRemoved.add(prevPartition)
+        }
+      } else {
+        Option(prevPartitionMap.get(partitionId)).foreach { prevPartition =>
+          if (prevPartition.isReplicaFor(brokerId)) {
+            _localRemoved.add(prevPartition)
+          }
+          val newPartitionMap = new util.HashMap[Int, MetadataPartition](prevPartitionMap.size() - 1)
+          prevPartitionMap.forEach(new BiConsumer[Int, MetadataPartition]() {
+            override def accept(key: Int, value: MetadataPartition): Unit =
+              if (!key.equals(partitionId)) {
+                newPartitionMap.put(key, value)
+              }
+          })
+          changed.put(newPartitionMap, true)
+          newNameMap.put(topicName, newPartitionMap)
+        }
+      }
+    }
+  }
+
+  def build(): MetadataPartitions = {
+    val result = new MetadataPartitions(newNameMap, newIdMap)
+    newNameMap = Collections.unmodifiableMap(newNameMap)
+    newIdMap = Collections.unmodifiableMap(newIdMap)
+    result
+  }
+
+  def localChanged(): Set[MetadataPartition] = _localChanged.asScala.toSet
+
+  def localRemoved(): Set[MetadataPartition] = _localRemoved.asScala.toSet
+}
+
+case class MetadataPartitions(private val nameMap: util.Map[String, util.Map[Int, MetadataPartition]],
+                              private val idMap: util.Map[Uuid, String]) {
+  def topicIdToName(uuid: Uuid): Option[String] = Option(idMap.get(uuid))
+
+  def copyNameMap(): util.Map[String, util.Map[Int, MetadataPartition]] = {
+    val copy = new util.HashMap[String, util.Map[Int, MetadataPartition]](nameMap.size())
+    copy.putAll(nameMap)
+    copy
+  }
+
+  def copyIdMap(): util.Map[Uuid, String] = {
+    val copy = new util.HashMap[Uuid, String](idMap.size())
+    copy.putAll(idMap)
+    copy
+  }
+
+  def allPartitions(): Iterator[MetadataPartition] = new AllPartitionsIterator(nameMap).asScala
+
+  def allTopicNames(): Set[String] = nameMap.keySet().asScala.toSet

Review comment:
       Another potentially dangerous copy here. I'm assuming this is unintended since I think we are trying to make the image immutable. We could fix this by using `collection.Set` as the return type and getting rid of the call to `toSet`.

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataPartitions.scala
##########
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.function.BiConsumer
+
+import org.apache.kafka.common.message.LeaderAndIsrRequestData
+import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
+import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
+import org.apache.kafka.common.metadata.{IsrChangeRecord, PartitionRecord}
+import org.apache.kafka.common.{TopicPartition, Uuid}
+
+import scala.jdk.CollectionConverters._
+
+
+object MetadataPartition {
+  def apply(name: String, record: PartitionRecord): MetadataPartition = {
+    MetadataPartition(name,
+      record.partitionId(),
+      record.leader(),
+      record.leaderEpoch(),
+      record.replicas(),
+      record.isr(),
+      Collections.emptyList(), // TODO: handle offline replicas
+      Collections.emptyList(),
+      Collections.emptyList())
+  }
+
+  def apply(prevPartition: Option[MetadataPartition],
+            partition: UpdateMetadataPartitionState): MetadataPartition = {
+    new MetadataPartition(partition.topicName(),
+      partition.partitionIndex(),
+      partition.leader(),
+      partition.leaderEpoch(),
+      partition.replicas(),
+      partition.isr(),
+      partition.offlineReplicas(),
+      prevPartition.flatMap(p => Some(p.addingReplicas)).getOrElse(Collections.emptyList()),
+      prevPartition.flatMap(p => Some(p.removingReplicas)).getOrElse(Collections.emptyList())
+    )
+  }
+}
+
+case class MetadataPartition(topicName: String,
+                             partitionIndex: Int,
+                             leaderId: Int,
+                             leaderEpoch: Int,
+                             replicas: util.List[Integer],
+                             isr: util.List[Integer],
+                             offlineReplicas: util.List[Integer],
+                             addingReplicas: util.List[Integer],
+                             removingReplicas: util.List[Integer]) {
+  def toTopicPartition(): TopicPartition = new TopicPartition(topicName, partitionIndex)
+
+  def toLeaderAndIsrPartitionState(isNew: Boolean): LeaderAndIsrRequestData.LeaderAndIsrPartitionState = {
+    new LeaderAndIsrPartitionState().setTopicName(topicName).
+      setPartitionIndex(partitionIndex).
+      setLeader(leaderId).
+      setLeaderEpoch(leaderEpoch).
+      setReplicas(replicas).
+      setIsr(isr).
+      setAddingReplicas(addingReplicas).
+      setRemovingReplicas(removingReplicas).
+      setIsNew(isNew)
+    // Note: we don't set ZKVersion here.
+  }
+
+  def isReplicaFor(brokerId: Int): Boolean = replicas.contains(Integer.valueOf(brokerId))
+
+  def copyWithIsrChanges(record: IsrChangeRecord): MetadataPartition = {
+    MetadataPartition(topicName,
+      partitionIndex,
+      record.leader(),
+      record.leaderEpoch(),
+      replicas,
+      record.isr(),
+      offlineReplicas,
+      addingReplicas,
+      removingReplicas)
+  }
+}
+
+class MetadataPartitionsBuilder(val brokerId: Int,
+                                val prevPartitions: MetadataPartitions) {
+  private var newNameMap = prevPartitions.copyNameMap()
+  private var newIdMap = prevPartitions.copyIdMap()
+  private val changed = new util.IdentityHashMap[Any, Boolean]()
+  private val _localChanged = new util.HashSet[MetadataPartition]
+  private val _localRemoved = new util.HashSet[MetadataPartition]
+
+  def topicIdToName(id: Uuid): Option[String] = Option(newIdMap.get(id))
+
+  def removeTopicById(id: Uuid): Iterable[MetadataPartition] = {
+    Option(newIdMap.remove(id)) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${id}")
+      case Some(name) => newNameMap.remove(name).values().asScala
+    }
+  }
+
+  def handleIsrChange(record: IsrChangeRecord): Unit = {
+    Option(newIdMap.get(record.topicId())) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${record.topicId()}")
+      case Some(name) => Option(newNameMap.get(name)) match {
+        case None => throw new RuntimeException(s"Unable to locate topic with name ${name}")
+        case Some(partitionMap) => Option(partitionMap.get(record.partitionId())) match {
+          case None => throw new RuntimeException(s"Unable to locate ${name}-${record.partitionId}")
+          case Some(partition) => set(partition.copyWithIsrChanges(record))
+        }
+      }
+    }
+  }
+
+  def addUuidMapping(name: String, id: Uuid): Unit = {
+    newIdMap.put(id, name)
+  }
+
+  def removeUuidMapping(id: Uuid): Unit = {
+    newIdMap.remove(id)
+  }
+
+  def get(topicName: String, partitionId: Int): Option[MetadataPartition] = {
+    Option(newNameMap.get(topicName)).flatMap(m => Option(m.get(partitionId)))
+  }
+
+  def set(partition: MetadataPartition): Unit = {
+    val prevPartitionMap = newNameMap.get(partition.topicName)
+    val newPartitionMap = if (prevPartitionMap == null) {
+      val m = new util.HashMap[Int, MetadataPartition](1)
+      changed.put(m, true)
+      m
+    } else if (changed.containsKey(prevPartitionMap)) {
+      prevPartitionMap
+    } else {
+      val m = new util.HashMap[Int, MetadataPartition](prevPartitionMap.size() + 1)
+      m.putAll(prevPartitionMap)
+      changed.put(m, true)
+      m
+    }
+    val prevPartition = newPartitionMap.put(partition.partitionIndex, partition)
+    if (partition.isReplicaFor(brokerId)) {
+      _localChanged.add(partition)
+    } else if (prevPartition != null && prevPartition.isReplicaFor(brokerId)) {
+      _localRemoved.add(prevPartition)
+    }
+    newNameMap.put(partition.topicName, newPartitionMap)
+  }
+
+  def remove(topicName: String, partitionId: Int): Unit = {
+    val prevPartitionMap = newNameMap.get(topicName)
+    if (prevPartitionMap != null) {
+      if (changed.containsKey(prevPartitionMap)) {
+        val prevPartition = prevPartitionMap.remove(partitionId)
+        if (prevPartition.isReplicaFor(brokerId)) {
+          _localRemoved.add(prevPartition)
+        }
+      } else {
+        Option(prevPartitionMap.get(partitionId)).foreach { prevPartition =>
+          if (prevPartition.isReplicaFor(brokerId)) {
+            _localRemoved.add(prevPartition)
+          }
+          val newPartitionMap = new util.HashMap[Int, MetadataPartition](prevPartitionMap.size() - 1)
+          prevPartitionMap.forEach(new BiConsumer[Int, MetadataPartition]() {
+            override def accept(key: Int, value: MetadataPartition): Unit =
+              if (!key.equals(partitionId)) {
+                newPartitionMap.put(key, value)
+              }
+          })
+          changed.put(newPartitionMap, true)
+          newNameMap.put(topicName, newPartitionMap)
+        }
+      }
+    }
+  }
+
+  def build(): MetadataPartitions = {
+    val result = new MetadataPartitions(newNameMap, newIdMap)
+    newNameMap = Collections.unmodifiableMap(newNameMap)
+    newIdMap = Collections.unmodifiableMap(newIdMap)
+    result
+  }
+
+  def localChanged(): Set[MetadataPartition] = _localChanged.asScala.toSet
+
+  def localRemoved(): Set[MetadataPartition] = _localRemoved.asScala.toSet
+}
+
+case class MetadataPartitions(private val nameMap: util.Map[String, util.Map[Int, MetadataPartition]],
+                              private val idMap: util.Map[Uuid, String]) {

Review comment:
       Intuitively, this seems a little backwards. I would have expected to have the partitions keyed by topicId with a separate map from topic name to topic id. If we were going to support renaming for example, then we would just update the name to id map.
   
   I suspect we are ultimately going to have to change this because it gives us no good way to lookup the UUID of a given topic name. We will need to be able to do this to handle `Metadata` queries (see https://github.com/apache/kafka/commits/trunk/core/src/main/scala/kafka/server/MetadataCache.scala). 

##########
File path: core/src/test/scala/kafka/server/metadata/MetadataPartitionsTest.scala
##########
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import org.apache.kafka.common.Uuid
+import org.junit.jupiter.api.Assertions._
+import org.junit.jupiter.api.{Test, Timeout}
+import org.slf4j.LoggerFactory
+
+import java.util.concurrent.TimeUnit
+import scala.collection.mutable
+import scala.jdk.CollectionConverters._
+
+
+@Timeout(value = 120000, unit = TimeUnit.MILLISECONDS)
+class MetadataPartitionsTest {
+
+  val log = LoggerFactory.getLogger(classOf[MetadataPartitionsTest])
+
+  val emptyPartitions = new MetadataPartitions(Collections.emptyMap(), Collections.emptyMap())
+
+  private def newPartition(topicName: String,
+                           partitionIndex: Int,
+                           replicas: Option[Seq[Int]] = None,
+                           isr: Option[Seq[Int]] = None): MetadataPartition = {
+    val effectiveReplicas = replicas match {
+      case None => util.Arrays.asList(Integer.valueOf(partitionIndex),

Review comment:
       nit: can probably clean this up a bit. For example:
   ```scala
       val effectiveReplicas = replicas
         .getOrElse(List(partitionIndex, partitionIndex + 1, partitionIndex + 2))
         .map(Int.box)
         .toList.asJava
   ```

##########
File path: core/src/main/scala/kafka/server/metadata/MetadataPartitions.scala
##########
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import java.util.function.BiConsumer
+
+import org.apache.kafka.common.message.LeaderAndIsrRequestData
+import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
+import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState
+import org.apache.kafka.common.metadata.{IsrChangeRecord, PartitionRecord}
+import org.apache.kafka.common.{TopicPartition, Uuid}
+
+import scala.jdk.CollectionConverters._
+
+
+object MetadataPartition {
+  def apply(name: String, record: PartitionRecord): MetadataPartition = {
+    MetadataPartition(name,
+      record.partitionId(),
+      record.leader(),
+      record.leaderEpoch(),
+      record.replicas(),
+      record.isr(),
+      Collections.emptyList(), // TODO: handle offline replicas
+      Collections.emptyList(),
+      Collections.emptyList())
+  }
+
+  def apply(prevPartition: Option[MetadataPartition],
+            partition: UpdateMetadataPartitionState): MetadataPartition = {
+    new MetadataPartition(partition.topicName(),
+      partition.partitionIndex(),
+      partition.leader(),
+      partition.leaderEpoch(),
+      partition.replicas(),
+      partition.isr(),
+      partition.offlineReplicas(),
+      prevPartition.flatMap(p => Some(p.addingReplicas)).getOrElse(Collections.emptyList()),
+      prevPartition.flatMap(p => Some(p.removingReplicas)).getOrElse(Collections.emptyList())
+    )
+  }
+}
+
+case class MetadataPartition(topicName: String,
+                             partitionIndex: Int,
+                             leaderId: Int,
+                             leaderEpoch: Int,
+                             replicas: util.List[Integer],
+                             isr: util.List[Integer],
+                             offlineReplicas: util.List[Integer],
+                             addingReplicas: util.List[Integer],
+                             removingReplicas: util.List[Integer]) {
+  def toTopicPartition(): TopicPartition = new TopicPartition(topicName, partitionIndex)
+
+  def toLeaderAndIsrPartitionState(isNew: Boolean): LeaderAndIsrRequestData.LeaderAndIsrPartitionState = {
+    new LeaderAndIsrPartitionState().setTopicName(topicName).
+      setPartitionIndex(partitionIndex).
+      setLeader(leaderId).
+      setLeaderEpoch(leaderEpoch).
+      setReplicas(replicas).
+      setIsr(isr).
+      setAddingReplicas(addingReplicas).
+      setRemovingReplicas(removingReplicas).
+      setIsNew(isNew)
+    // Note: we don't set ZKVersion here.
+  }
+
+  def isReplicaFor(brokerId: Int): Boolean = replicas.contains(Integer.valueOf(brokerId))
+
+  def copyWithIsrChanges(record: IsrChangeRecord): MetadataPartition = {
+    MetadataPartition(topicName,
+      partitionIndex,
+      record.leader(),
+      record.leaderEpoch(),
+      replicas,
+      record.isr(),
+      offlineReplicas,
+      addingReplicas,
+      removingReplicas)
+  }
+}
+
+class MetadataPartitionsBuilder(val brokerId: Int,
+                                val prevPartitions: MetadataPartitions) {
+  private var newNameMap = prevPartitions.copyNameMap()
+  private var newIdMap = prevPartitions.copyIdMap()
+  private val changed = new util.IdentityHashMap[Any, Boolean]()
+  private val _localChanged = new util.HashSet[MetadataPartition]
+  private val _localRemoved = new util.HashSet[MetadataPartition]
+
+  def topicIdToName(id: Uuid): Option[String] = Option(newIdMap.get(id))
+
+  def removeTopicById(id: Uuid): Iterable[MetadataPartition] = {
+    Option(newIdMap.remove(id)) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${id}")
+      case Some(name) => newNameMap.remove(name).values().asScala
+    }
+  }
+
+  def handleIsrChange(record: IsrChangeRecord): Unit = {
+    Option(newIdMap.get(record.topicId())) match {
+      case None => throw new RuntimeException(s"Unable to locate topic with ID ${record.topicId()}")
+      case Some(name) => Option(newNameMap.get(name)) match {
+        case None => throw new RuntimeException(s"Unable to locate topic with name ${name}")
+        case Some(partitionMap) => Option(partitionMap.get(record.partitionId())) match {
+          case None => throw new RuntimeException(s"Unable to locate ${name}-${record.partitionId}")
+          case Some(partition) => set(partition.copyWithIsrChanges(record))
+        }
+      }
+    }
+  }
+
+  def addUuidMapping(name: String, id: Uuid): Unit = {
+    newIdMap.put(id, name)
+  }
+
+  def removeUuidMapping(id: Uuid): Unit = {
+    newIdMap.remove(id)
+  }
+
+  def get(topicName: String, partitionId: Int): Option[MetadataPartition] = {
+    Option(newNameMap.get(topicName)).flatMap(m => Option(m.get(partitionId)))
+  }
+
+  def set(partition: MetadataPartition): Unit = {
+    val prevPartitionMap = newNameMap.get(partition.topicName)
+    val newPartitionMap = if (prevPartitionMap == null) {
+      val m = new util.HashMap[Int, MetadataPartition](1)
+      changed.put(m, true)
+      m
+    } else if (changed.containsKey(prevPartitionMap)) {
+      prevPartitionMap
+    } else {
+      val m = new util.HashMap[Int, MetadataPartition](prevPartitionMap.size() + 1)
+      m.putAll(prevPartitionMap)
+      changed.put(m, true)
+      m
+    }
+    val prevPartition = newPartitionMap.put(partition.partitionIndex, partition)
+    if (partition.isReplicaFor(brokerId)) {
+      _localChanged.add(partition)
+    } else if (prevPartition != null && prevPartition.isReplicaFor(brokerId)) {
+      _localRemoved.add(prevPartition)
+    }
+    newNameMap.put(partition.topicName, newPartitionMap)
+  }
+
+  def remove(topicName: String, partitionId: Int): Unit = {
+    val prevPartitionMap = newNameMap.get(topicName)
+    if (prevPartitionMap != null) {
+      if (changed.containsKey(prevPartitionMap)) {
+        val prevPartition = prevPartitionMap.remove(partitionId)
+        if (prevPartition.isReplicaFor(brokerId)) {
+          _localRemoved.add(prevPartition)
+        }
+      } else {
+        Option(prevPartitionMap.get(partitionId)).foreach { prevPartition =>
+          if (prevPartition.isReplicaFor(brokerId)) {
+            _localRemoved.add(prevPartition)
+          }
+          val newPartitionMap = new util.HashMap[Int, MetadataPartition](prevPartitionMap.size() - 1)
+          prevPartitionMap.forEach(new BiConsumer[Int, MetadataPartition]() {
+            override def accept(key: Int, value: MetadataPartition): Unit =
+              if (!key.equals(partitionId)) {
+                newPartitionMap.put(key, value)
+              }
+          })
+          changed.put(newPartitionMap, true)
+          newNameMap.put(topicName, newPartitionMap)
+        }
+      }
+    }
+  }
+
+  def build(): MetadataPartitions = {
+    val result = new MetadataPartitions(newNameMap, newIdMap)
+    newNameMap = Collections.unmodifiableMap(newNameMap)
+    newIdMap = Collections.unmodifiableMap(newIdMap)
+    result
+  }
+
+  def localChanged(): Set[MetadataPartition] = _localChanged.asScala.toSet

Review comment:
       Hmm.. I wonder if the copy here is necessary. If it is, perhaps we should emphasize it in the name. For example `copyLocalChanged`.

##########
File path: core/src/test/scala/kafka/server/metadata/MetadataPartitionsTest.scala
##########
@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server.metadata
+
+import java.util
+import java.util.Collections
+import org.apache.kafka.common.Uuid
+import org.junit.jupiter.api.Assertions._
+import org.junit.jupiter.api.{Test, Timeout}
+import org.slf4j.LoggerFactory
+
+import java.util.concurrent.TimeUnit
+import scala.collection.mutable
+import scala.jdk.CollectionConverters._
+
+
+@Timeout(value = 120000, unit = TimeUnit.MILLISECONDS)
+class MetadataPartitionsTest {
+
+  val log = LoggerFactory.getLogger(classOf[MetadataPartitionsTest])
+
+  val emptyPartitions = new MetadataPartitions(Collections.emptyMap(), Collections.emptyMap())
+
+  private def newPartition(topicName: String,
+                           partitionIndex: Int,
+                           replicas: Option[Seq[Int]] = None,
+                           isr: Option[Seq[Int]] = None): MetadataPartition = {
+    val effectiveReplicas = replicas match {
+      case None => util.Arrays.asList(Integer.valueOf(partitionIndex),
+        Integer.valueOf(partitionIndex + 1), Integer.valueOf(partitionIndex + 2))
+      case Some(s) => s.map(Integer.valueOf(_)).toList.asJava
+    }
+    val effectiveIsr = isr match {
+      case None => effectiveReplicas
+      case Some(s) => s.map(Integer.valueOf(_)).toList.asJava
+    }
+    new MetadataPartition(topicName,
+      partitionIndex,
+      partitionIndex % 3, 100,
+      effectiveReplicas,
+      effectiveIsr,
+      Collections.emptyList(),
+      Collections.emptyList(),
+      Collections.emptyList())
+  }
+
+  @Test
+  def testBuildPartitions(): Unit = {
+    val builder = new MetadataPartitionsBuilder(0, emptyPartitions)
+    assertEquals(None, builder.get("foo", 0))
+    builder.set(newPartition("foo", 0))
+    assertEquals(Some(newPartition("foo", 0)), builder.get("foo", 0))
+    assertEquals(None, builder.get("foo", 1))
+    builder.set(newPartition("foo", 1))
+    builder.set(newPartition("bar", 0))
+    val partitions = builder.build()
+    assertEquals(Some(newPartition("foo", 0)), partitions.get("foo", 0))
+    assertEquals(Some(newPartition("foo", 1)), partitions.get("foo", 1))
+    assertEquals(None, partitions.get("foo", 2))
+    assertEquals(Some(newPartition("bar", 0)), partitions.get("bar", 0))
+  }
+
+  @Test
+  def testAllPartitionsIterator(): Unit = {
+    val builder = new MetadataPartitionsBuilder(0, emptyPartitions)
+    val expected = new mutable.HashSet[MetadataPartition]()
+    expected += newPartition("foo", 0)
+    expected += newPartition("foo", 1)
+    expected += newPartition("foo", 2)
+    expected += newPartition("bar", 0)
+    expected += newPartition("bar", 1)
+    expected += newPartition("baz", 0)
+    expected.foreach { builder.set(_) }

Review comment:
       nit: simplify a little bit
   ```scala
       expected.foreach(builder.set)
   ```




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
users@infra.apache.org