You are viewing a plain text version of this content. The canonical link for it is here.
Posted to reviews@spark.apache.org by gaborgsomogyi <gi...@git.apache.org> on 2018/09/04 09:51:36 UTC
[GitHub] spark pull request #22138: [SPARK-25151][SS] Apply Apache Commons Pool to Ka...
Github user gaborgsomogyi commented on a diff in the pull request:
https://github.com/apache/spark/pull/22138#discussion_r214813543
--- Diff: external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/InternalKafkaConsumerPool.scala ---
@@ -0,0 +1,241 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.kafka010
+
+import java.{util => ju}
+import java.util.concurrent.ConcurrentHashMap
+
+import org.apache.commons.pool2.{BaseKeyedPooledObjectFactory, PooledObject, SwallowedExceptionListener}
+import org.apache.commons.pool2.impl.{DefaultEvictionPolicy, DefaultPooledObject, GenericKeyedObjectPool, GenericKeyedObjectPoolConfig}
+
+import org.apache.spark.SparkEnv
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.kafka010.InternalKafkaConsumerPool._
+import org.apache.spark.sql.kafka010.KafkaDataConsumer.CacheKey
+
+/**
+ * Provides object pool for [[InternalKafkaConsumer]] which is grouped by [[CacheKey]].
+ *
+ * This class leverages [[GenericKeyedObjectPool]] internally, hence providing methods based on
+ * the class, and same contract applies: after using the borrowed object, you must either call
+ * returnObject() if the object is healthy to return to pool, or invalidateObject() if the object
+ * should be destroyed.
+ *
+ * The soft capacity of pool is determined by "spark.sql.kafkaConsumerCache.capacity" config value,
+ * and the pool will have reasonable default value if the value is not provided.
+ * (The instance will do its best effort to respect soft capacity but it can exceed when there's
+ * a borrowing request and there's neither free space nor idle object to clear.)
+ *
+ * This class guarantees that no caller will get pooled object once the object is borrowed and
+ * not yet returned, hence provide thread-safety usage of non-thread-safe [[InternalKafkaConsumer]]
+ * unless caller shares the object to multiple threads.
+ */
+private[kafka010] class InternalKafkaConsumerPool(
+ objectFactory: ObjectFactory,
+ poolConfig: PoolConfig) {
+
+ // the class is intended to have only soft capacity
+ assert(poolConfig.getMaxTotal < 0)
+
+ private lazy val pool = {
+ val internalPool = new GenericKeyedObjectPool[CacheKey, InternalKafkaConsumer](
+ objectFactory, poolConfig)
+ internalPool.setSwallowedExceptionListener(CustomSwallowedExceptionListener)
+ internalPool
+ }
+
+ /**
+ * Borrows [[InternalKafkaConsumer]] object from the pool. If there's no idle object for the key,
+ * the pool will create the [[InternalKafkaConsumer]] object.
+ *
+ * If the pool doesn't have idle object for the key and also exceeds the soft capacity,
+ * pool will try to clear some of idle objects.
+ *
+ * Borrowed object must be returned by either calling returnObject or invalidateObject, otherwise
+ * the object will be kept in pool as active object.
+ */
+ def borrowObject(key: CacheKey, kafkaParams: ju.Map[String, Object]): InternalKafkaConsumer = {
+ updateKafkaParamForKey(key, kafkaParams)
+
+ if (getTotal == poolConfig.getSoftMaxTotal()) {
+ pool.clearOldest()
+ }
+
+ pool.borrowObject(key)
+ }
+
+ /** Returns borrowed object to the pool. */
+ def returnObject(consumer: InternalKafkaConsumer): Unit = {
+ pool.returnObject(extractCacheKey(consumer), consumer)
+ }
+
+ /** Invalidates (destroy) borrowed object to the pool. */
+ def invalidateObject(consumer: InternalKafkaConsumer): Unit = {
+ pool.invalidateObject(extractCacheKey(consumer), consumer)
+ }
+
+ /** Invalidates all idle consumers for the key */
+ def invalidateKey(key: CacheKey): Unit = {
+ pool.clear(key)
+ }
+
+ /**
+ * Closes the keyed object pool. Once the pool is closed,
+ * borrowObject will fail with [[IllegalStateException]], but returnObject and invalidateObject
+ * will continue to work, with returned objects destroyed on return.
+ *
+ * Also destroys idle instances in the pool.
+ */
+ def close(): Unit = {
+ pool.close()
+ }
+
+ def getNumIdle: Int = pool.getNumIdle
+
+ def getNumIdle(key: CacheKey): Int = pool.getNumIdle(key)
+
+ def getNumActive: Int = pool.getNumActive
+
+ def getNumActive(key: CacheKey): Int = pool.getNumActive(key)
+
+ def getTotal: Int = getNumIdle + getNumActive
+
+ def getTotal(key: CacheKey): Int = getNumIdle(key) + getNumActive(key)
+
+ private def updateKafkaParamForKey(key: CacheKey, kafkaParams: ju.Map[String, Object]): Unit = {
+ // We can assume that kafkaParam should not be different for same cache key,
--- End diff --
Then why not enforce it?
---
---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscribe@spark.apache.org
For additional commands, e-mail: reviews-help@spark.apache.org