You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ku...@apache.org on 2020/11/02 16:17:55 UTC

[carbondata] branch master updated: [CARBONDATA-4006] Fix for currentUser as NULL in getcount method during index server fallback mode

This is an automated email from the ASF dual-hosted git repository.

kunalkapoor pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 4a729e1  [CARBONDATA-4006] Fix for currentUser as NULL in getcount method during index server fallback mode
4a729e1 is described below

commit 4a729e10f75ea3c5b99540e089b9dfb204bb84f5
Author: Vikram Ahuja <vi...@gmail.com>
AuthorDate: Wed Sep 23 12:57:58 2020 +0530

    [CARBONDATA-4006] Fix for currentUser as NULL in getcount method
    during index server fallback mode
    
    Why is this PR needed?
    In case of index server fallback, Server.getRemoteUser.getShortUserName
    always returns NULL. Can give null pointer exception in any generic events.
    
    What changes were proposed in this PR?
    Do not launch any generic events in case of index server fallback mode.
    The behaviour is similar as getSplits and invalidateSegmentCache API's.
    
    This closes #3952
---
 .../org/apache/carbondata/indexserver/IndexServer.scala      | 12 ++++--------
 1 file changed, 4 insertions(+), 8 deletions(-)

diff --git a/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexServer.scala b/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexServer.scala
index 36c2ad4..207fb85 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexServer.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/indexserver/IndexServer.scala
@@ -128,10 +128,6 @@ object IndexServer extends ServerInterface {
   def getCount(request: IndexInputFormat): LongWritable = {
     doAs {
       val sparkSession = SparkSQLUtil.getSparkSession
-      var currentUser: String = null
-      if (!request.isFallbackJob) {
-        currentUser = Server.getRemoteUser.getShortUserName
-      }
       lazy val getCountTask = {
         if (!request.isFallbackJob) {
           sparkSession.sparkContext.setLocalProperty("spark.jobGroup.id", request.getTaskGroupId)
@@ -142,11 +138,11 @@ object IndexServer extends ServerInterface {
               CarbonCommonConstants.POINT + request.getCarbonTable.getTableName
           }
           sparkSession.sparkContext.setLocalProperty("spark.job.description", taskGroupDesc)
+          // Fire Generic Event like ACLCheck..etc
+          val indexServerEvent = IndexServerEvent(sparkSession, request.getCarbonTable,
+            Server.getRemoteUser.getShortUserName)
+          OperationListenerBus.getInstance().fireEvent(indexServerEvent, operationContext)
         }
-        // Fire Generic Event like ACLCheck..etc
-        val indexServerEvent = IndexServerEvent(sparkSession, request.getCarbonTable,
-          currentUser)
-        OperationListenerBus.getInstance().fireEvent(indexServerEvent, operationContext)
         val splits = new DistributedCountRDD(sparkSession, request).collect()
         if (!request.isFallbackJob) {
           DistributedRDDUtils.updateExecutorCacheSize(splits.map(_._1).toSet)