You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@ranger.apache.org by "Jiayi Liu (Jira)" <ji...@apache.org> on 2020/11/24 10:21:00 UTC

[jira] [Created] (RANGER-3086) log4j audit's behavior is different from that configured in hive-log4j2.properties

Jiayi Liu created RANGER-3086:
---------------------------------

             Summary: log4j audit's behavior is different from that configured in hive-log4j2.properties
                 Key: RANGER-3086
                 URL: https://issues.apache.org/jira/browse/RANGER-3086
             Project: Ranger
          Issue Type: Bug
          Components: audit
    Affects Versions: 2.1.0, 2.0.0
            Reporter: Jiayi Liu


I need to output the audit of the ranger to a local file. ranger-1.2.0+hive-2.3.5 works very well, but the same configuration in ranger-2.x+hive-3.1.2+hadoop-3.1.3 cannot output the log files I need correctly, as if the ranger audit did not read hive-log4j2.properties, the json log has been output to hivesever2.err.

Can someone please take a look? Below is my configuration.

hive-log4j2.properties

{code:java}
status = INFO
name = HiveLog4j2
packages = org.apache.hadoop.hive.ql.log

# list of properties
property.hive.log.level = INFO
property.hive.root.logger = DRFA
property.hive.log.dir = ${sys:java.io.tmpdir}/${sys:user.name}
property.hive.log.file = hive.log
property.hive.perflogger.log.level = INFO

# list of all appenders
appenders = console, DRFA, RANGERAUDIT

# console appender
appender.console.type = Console
appender.console.name = console
appender.console.target = SYSTEM_ERR
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n

# daily rolling file appender
appender.DRFA.type = RollingRandomAccessFile
appender.DRFA.name = DRFA
appender.DRFA.fileName = ${sys:hive.log.dir}/${sys:hive.log.file}
# Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session
appender.DRFA.filePattern = ${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}
appender.DRFA.layout.type = PatternLayout
appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n
appender.DRFA.policies.type = Policies
appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy
appender.DRFA.policies.time.interval = 1
appender.DRFA.policies.time.modulate = true
appender.DRFA.strategy.type = DefaultRolloverStrategy
appender.DRFA.strategy.max = 30

appender.DRFA.strategy.action.type = Delete
appender.DRFA.strategy.action.basepath = /var/log/hive
appender.DRFA.strategy.action.followLinks = true
appender.DRFA.strategy.action.condition.type = IfAccumulatedFileSize
appender.DRFA.strategy.action.condition.exceeds = 500MB
appender.DRFA.strategy.action.condition.nested_condition.type = IfFileName
appender.DRFA.strategy.action.condition.nested_condition.glob = ${sys:hive.log.file}.*

# RANGERAUDIT appender
appender.RANGERAUDIT.type=file
appender.RANGERAUDIT.name=RANGERAUDIT
appender.RANGERAUDIT.fileName=${sys:hive.log.dir}/ranger-audit.log
appender.RANGERAUDIT.filePermissions=rwxrwxrwx
appender.RANGERAUDIT.layout.type=PatternLayout
appender.RANGERAUDIT.layout.pattern=%d{ISO8601} %q %5p [%t] %c{2} (%F:%M(%L)) - %m%n

# list of all loggers
loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, PerfLogger, Ranger

logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
logger.NIOServerCnxn.level = WARN

logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
logger.ClientCnxnSocketNIO.level = WARN

logger.DataNucleus.name = DataNucleus
logger.DataNucleus.level = ERROR

logger.Datastore.name = Datastore
logger.Datastore.level = ERROR

logger.JPOX.name = JPOX
logger.JPOX.level = ERROR

logger.PerfLogger.name = org.apache.hadoop.hive.ql.log.PerfLogger
logger.PerfLogger.level = ${sys:hive.perflogger.log.level}

#logger.Log4JAuditDestination.name = org.apache.ranger.audit.destination.Log4JAuditDestination
#logger.Log4JAuditDestination.level = INFO
#logger.Log4JAuditDestination.appenderRefs = RANGERAUDIT
#logger.Log4JAuditDestination.appenderRef.RANGERAUDIT.ref = RANGERAUDIT

logger.Ranger.name = xaaudit
logger.Ranger.level = INFO
logger.Ranger.appenderRefs = RANGERAUDIT
logger.Ranger.appenderRef.RANGERAUDIT.ref = RANGERAUDIT

# root logger
rootLogger.level = ${sys:hive.log.level}
rootLogger.appenderRefs = root
rootLogger.appenderRef.root.ref = ${sys:hive.root.logger}
{code}

ranger-hive-audit.xml

{code:java}
<property>
        <name>xasecure.audit.log4j.is.enabled</name>
        <value>true</value>
</property>

<property>
        <name>xasecure.audit.log4j.is.async</name>
        <value>false</value>
</property>

<property>
        <name>xasecure.audit.log4j.async.max.queue.size</name>
        <value>10240</value>
</property>

<property>
        <name>xasecure.audit.log4j.async.max.flush.interval.ms</name>
        <value>30000</value>
</property>
<property>
        <name>xasecure.audit.destination.log4j</name>
        <value>true</value>
</property>
<property>
        <name>xasecure.audit.destination.log4j.logger</name>
        <value>xaaudit</value>
</property>
{code}





--
This message was sent by Atlassian Jira
(v8.3.4#803005)