You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ranger.apache.org by bo...@apache.org on 2015/04/22 19:23:18 UTC
[01/12] incubator-ranger git commit: RANGER-276 Add support for
aggregating audit logs at source
Repository: incubator-ranger
Updated Branches:
refs/heads/master 2f8bcd234 -> 4f3cea223
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/security-admin/src/test/java/org/apache/ranger/audit/TestAuditProcessor.java
----------------------------------------------------------------------
diff --git a/security-admin/src/test/java/org/apache/ranger/audit/TestAuditProcessor.java b/security-admin/src/test/java/org/apache/ranger/audit/TestAuditProcessor.java
deleted file mode 100644
index a023b9a..0000000
--- a/security-admin/src/test/java/org/apache/ranger/audit/TestAuditProcessor.java
+++ /dev/null
@@ -1,786 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ranger.audit;
-
-import static org.junit.Assert.*;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.ranger.audit.model.AuditEventBase;
-import org.apache.ranger.audit.model.AuthzAuditEvent;
-import org.apache.ranger.audit.provider.AuditAsyncQueue;
-import org.apache.ranger.audit.provider.AuditBatchProcessor;
-import org.apache.ranger.audit.provider.AuditDestination;
-import org.apache.ranger.audit.provider.AuditFileSpool;
-import org.apache.ranger.audit.provider.AuditProvider;
-import org.apache.ranger.audit.provider.AuditProviderFactory;
-import org.apache.ranger.audit.provider.BaseAuditProvider;
-import org.apache.ranger.audit.provider.FileAuditDestination;
-import org.apache.ranger.audit.provider.MiscUtil;
-import org.apache.ranger.audit.provider.MultiDestAuditProvider;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class TestAuditProcessor {
-
- private static final Log logger = LogFactory
- .getLog(TestAuditProcessor.class);
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- }
-
- static private int seqNum = 0;
-
- @Test
- public void testAuditAsyncQueue() {
- logger.debug("testAuditAsyncQueue()...");
- TestConsumer testConsumer = new TestConsumer();
- AuditAsyncQueue queue = new AuditAsyncQueue(testConsumer);
- Properties props = new Properties();
- queue.init(props);
-
- queue.start();
-
- int messageToSend = 10;
- for (int i = 0; i < messageToSend; i++) {
- queue.log(createEvent());
- }
- queue.stop();
- queue.waitToComplete();
- // Let's wait for second
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- // ignore
- }
- assertEquals(messageToSend, testConsumer.getCountTotal());
- assertEquals(messageToSend, testConsumer.getSumTotal());
- assertNull("Event not in sequnce", testConsumer.isInSequence());
- }
-
- @Test
- public void testMultipleQueue() {
- logger.debug("testAuditAsyncQueue()...");
- int destCount = 3;
- TestConsumer[] testConsumer = new TestConsumer[destCount];
-
- MultiDestAuditProvider multiQueue = new MultiDestAuditProvider();
- for (int i = 0; i < destCount; i++) {
- testConsumer[i] = new TestConsumer();
- multiQueue.addAuditProvider(testConsumer[i]);
- }
-
- AuditAsyncQueue queue = new AuditAsyncQueue(multiQueue);
- Properties props = new Properties();
- queue.init(props);
- queue.start();
-
- int messageToSend = 10;
- for (int i = 0; i < messageToSend; i++) {
- queue.log(createEvent());
- }
- queue.stop();
- queue.waitToComplete();
- // Let's wait for second
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- // ignore
- }
- for (int i = 0; i < destCount; i++) {
- assertEquals("consumer" + i, messageToSend,
- testConsumer[i].getCountTotal());
- assertEquals("consumer" + i, messageToSend,
- testConsumer[i].getSumTotal());
-
- }
- }
-
- @Test
- public void testAuditBatchProcessorBySize() {
- logger.debug("testAuditBatchProcessor()...");
- int messageToSend = 10;
-
- String basePropName = "ranger.test.batch";
- int batchSize = messageToSend / 3;
- int expectedBatchSize = batchSize
- + (batchSize * 3 < messageToSend ? 1 : 0);
- int queueSize = messageToSend * 2;
- int intervalMS = messageToSend * 100; // Deliberately big interval
- Properties props = new Properties();
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
- + batchSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
- + queueSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
- "" + intervalMS);
-
- TestConsumer testConsumer = new TestConsumer();
- AuditBatchProcessor queue = new AuditBatchProcessor(testConsumer);
- queue.init(props, basePropName);
- queue.start();
-
- for (int i = 0; i < messageToSend; i++) {
- queue.log(createEvent());
-
- }
- // Let's wait for second
- try {
- Thread.sleep(2000);
- } catch (InterruptedException e) {
- // ignore
- }
-
- queue.waitToComplete();
- queue.stop();
- queue.waitToComplete();
-
- assertEquals("Total count", messageToSend, testConsumer.getCountTotal());
- assertEquals("Total sum", messageToSend, testConsumer.getSumTotal());
- assertEquals("Total batch", expectedBatchSize,
- testConsumer.getBatchCount());
- assertNull("Event not in sequnce", testConsumer.isInSequence());
-
- }
-
- @Test
- public void testAuditBatchProcessorByTime() {
- logger.debug("testAuditBatchProcessor()...");
-
- int messageToSend = 10;
-
- String basePropName = "ranger.test.batch";
- int batchSize = messageToSend * 2; // Deliberately big size
- int queueSize = messageToSend * 2;
- int intervalMS = (1000 / messageToSend) * 3; // e.g (1000/10 * 3) = 300
- // ms
- int pauseMS = 1000 / messageToSend + 3; // e.g. 1000/10 -5 = 95ms
- int expectedBatchSize = (messageToSend * pauseMS) / intervalMS + 1;
-
- Properties props = new Properties();
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
- + batchSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
- + queueSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
- "" + intervalMS);
-
- TestConsumer testConsumer = new TestConsumer();
- AuditBatchProcessor queue = new AuditBatchProcessor(testConsumer);
- queue.init(props, basePropName);
- queue.start();
-
- for (int i = 0; i < messageToSend; i++) {
- queue.log(createEvent());
- try {
- Thread.sleep(pauseMS);
- } catch (InterruptedException e) {
- // ignore
- }
- }
- // Let's wait for second
- try {
- Thread.sleep(2000);
- } catch (InterruptedException e) {
- // ignore
- }
- queue.waitToComplete();
- queue.stop();
- queue.waitToComplete();
-
- assertEquals("Total count", messageToSend, testConsumer.getCountTotal());
- assertEquals("Total sum", messageToSend, testConsumer.getSumTotal());
- assertEquals("Total batch", expectedBatchSize,
- testConsumer.getBatchCount());
- assertNull("Event not in sequnce", testConsumer.isInSequence());
- }
-
- @Test
- public void testAuditBatchProcessorDestDown() {
- logger.debug("testAuditBatchProcessorDestDown()...");
- int messageToSend = 10;
-
- String basePropName = "ranger.test.batch";
- int batchSize = messageToSend / 3;
- int queueSize = messageToSend * 2;
- int intervalMS = Integer.MAX_VALUE; // Deliberately big interval
- Properties props = new Properties();
- props.put(basePropName + "." + BaseAuditProvider.PROP_NAME,
- "testAuditBatchProcessorDestDown");
-
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
- + batchSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
- + queueSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
- "" + intervalMS);
-
- // Enable File Spooling
- props.put(basePropName + "." + "filespool.enable", "" + true);
- props.put(basePropName + "." + "filespool.dir", "target");
-
- TestConsumer testConsumer = new TestConsumer();
- testConsumer.isDown = true;
-
- AuditBatchProcessor queue = new AuditBatchProcessor(testConsumer);
- queue.init(props, basePropName);
- queue.start();
-
- for (int i = 0; i < messageToSend; i++) {
- queue.log(createEvent());
-
- }
- // Let's wait for second
- try {
- Thread.sleep(2000);
- } catch (InterruptedException e) {
- // ignore
- }
-
- queue.waitToComplete(5000);
- queue.stop();
- queue.waitToComplete();
-
- assertEquals("Total count", 0, testConsumer.getCountTotal());
- assertEquals("Total sum", 0, testConsumer.getSumTotal());
- assertEquals("Total batch", 0, testConsumer.getBatchCount());
- assertNull("Event not in sequnce", testConsumer.isInSequence());
- }
-
- //@Test
- public void testAuditBatchProcessorDestDownFlipFlop() {
- logger.debug("testAuditBatchProcessorDestDown()...");
- int messageToSend = 10;
-
- String basePropName = "ranger.test.batch";
- int batchSize = messageToSend / 3;
- int expectedBatchSize = batchSize
- + (batchSize * 3 < messageToSend ? 1 : 0);
- int queueSize = messageToSend * 2;
- int intervalMS = 3000; // Deliberately big interval
- Properties props = new Properties();
- props.put(
- basePropName + "." + BaseAuditProvider.PROP_NAME,
- "testAuditBatchProcessorDestDownFlipFlop_"
- + MiscUtil.generateUniqueId());
-
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
- + batchSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
- + queueSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
- "" + intervalMS);
-
- // Enable File Spooling
- int destRetryMS = 10;
- props.put(
- basePropName + "." + BaseAuditProvider.PROP_FILE_SPOOL_ENABLE,
- "" + true);
- props.put(
- basePropName + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR,
- "target");
- props.put(basePropName + "."
- + AuditFileSpool.PROP_FILE_SPOOL_DEST_RETRY_MS, ""
- + destRetryMS);
-
- TestConsumer testConsumer = new TestConsumer();
- testConsumer.isDown = false;
-
- AuditBatchProcessor queue = new AuditBatchProcessor(testConsumer);
- queue.init(props, basePropName);
- queue.start();
-
- try {
- queue.log(createEvent());
- queue.log(createEvent());
- queue.log(createEvent());
- Thread.sleep(1000);
- testConsumer.isDown = true;
- Thread.sleep(1000);
- queue.log(createEvent());
- queue.log(createEvent());
- queue.log(createEvent());
- Thread.sleep(1000);
- testConsumer.isDown = false;
- Thread.sleep(1000);
- queue.log(createEvent());
- queue.log(createEvent());
- queue.log(createEvent());
- Thread.sleep(1000);
- testConsumer.isDown = true;
- Thread.sleep(1000);
- queue.log(createEvent());
- Thread.sleep(1000);
- testConsumer.isDown = false;
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- // ignore
- }
- // Let's wait for second
- try {
- Thread.sleep(2000);
- } catch (InterruptedException e) {
- // ignore
- }
-
- queue.waitToComplete(5000);
- queue.stop();
- queue.waitToComplete();
-
- assertEquals("Total count", messageToSend, testConsumer.getCountTotal());
- assertEquals("Total sum", messageToSend, testConsumer.getSumTotal());
- assertNull("Event not in sequnce", testConsumer.isInSequence());
-
- }
-
- /**
- * See if we recover after restart
- */
- public void testAuditBatchProcessorDestDownRestart() {
- logger.debug("testAuditBatchProcessorDestDownRestart()...");
- int messageToSend = 10;
-
- String basePropName = "ranger.test.batch";
- int batchSize = messageToSend / 3;
- int queueSize = messageToSend * 2;
- int intervalMS = 3000; // Deliberately big interval
- int maxArchivedFiles = 1;
- Properties props = new Properties();
- props.put(
- basePropName + "." + BaseAuditProvider.PROP_NAME,
- "testAuditBatchProcessorDestDownRestart_"
- + MiscUtil.generateUniqueId());
-
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
- + batchSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
- + queueSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
- "" + intervalMS);
-
- // Enable File Spooling
- int destRetryMS = 10;
- props.put(
- basePropName + "." + BaseAuditProvider.PROP_FILE_SPOOL_ENABLE,
- "" + true);
- props.put(
- basePropName + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR,
- "target");
- props.put(basePropName + "."
- + AuditFileSpool.PROP_FILE_SPOOL_DEST_RETRY_MS, ""
- + destRetryMS);
- props.put(basePropName + "."
- + AuditFileSpool.PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, ""
- + maxArchivedFiles);
-
- TestConsumer testConsumer = new TestConsumer();
- testConsumer.isDown = true;
-
- AuditBatchProcessor queue = new AuditBatchProcessor(testConsumer);
- queue.init(props, basePropName);
- queue.start();
-
- for (int i = 0; i < messageToSend; i++) {
- queue.log(createEvent());
-
- }
- // Let's wait for second or two
- try {
- Thread.sleep(2000);
- } catch (InterruptedException e) {
- // ignore
- }
-
- queue.waitToComplete(5000);
- queue.stop();
- queue.waitToComplete();
-
- testConsumer.isDown = true;
-
- // Let's wait for second or two
- try {
- Thread.sleep(5000);
- } catch (InterruptedException e) {
- // ignore
- }
-
-
- // Let's now recreate the objects
- testConsumer = new TestConsumer();
-
- queue = new AuditBatchProcessor(testConsumer);
- queue.init(props, basePropName);
- queue.start();
-
- // Let's wait for second
- try {
- Thread.sleep(2000);
- } catch (InterruptedException e) {
- // ignore
- }
-
- queue.waitToComplete(5000);
- queue.stop();
- queue.waitToComplete();
-
- assertEquals("Total count", messageToSend, testConsumer.getCountTotal());
- assertEquals("Total sum", messageToSend, testConsumer.getSumTotal());
- assertNull("Event not in sequnce", testConsumer.isInSequence());
-
- }
-
- @Test
- public void testFileDestination() {
- logger.debug("testFileDestination()...");
-
- int messageToSend = 10;
- int batchSize = messageToSend / 3;
- int queueSize = messageToSend * 2;
- int intervalMS = 500; // Should be less than final sleep time
-
- String logFolderName = "target/testFileDestination";
- File logFolder = new File(logFolderName);
- String logFileName = "test_ranger_audit.log";
- File logFile = new File(logFolder, logFileName);
-
- Properties props = new Properties();
- // Destination
- String filePropPrefix = AuditProviderFactory.AUDIT_DEST_BASE + ".file";
- props.put(filePropPrefix, "enable");
- props.put(filePropPrefix + "." + BaseAuditProvider.PROP_NAME, "file");
- props.put(filePropPrefix + "."
- + FileAuditDestination.PROP_FILE_LOCAL_DIR, logFolderName);
- props.put(filePropPrefix + "."
- + FileAuditDestination.PROP_FILE_LOCAL_FILE_NAME_FORMAT,
- "%app-type%_ranger_audit.log");
- props.put(filePropPrefix + "."
- + FileAuditDestination.PROP_FILE_FILE_ROLLOVER, "" + 10);
-
- props.put(filePropPrefix + "." + BaseAuditProvider.PROP_QUEUE, "batch");
- String batchPropPrefix = filePropPrefix + "." + "batch";
-
- props.put(batchPropPrefix + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
- + batchSize);
- props.put(batchPropPrefix + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
- + queueSize);
- props.put(
- batchPropPrefix + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
- "" + intervalMS);
-
- // Enable File Spooling
- int destRetryMS = 10;
- props.put(batchPropPrefix + "."
- + BaseAuditProvider.PROP_FILE_SPOOL_ENABLE, "" + true);
- props.put(batchPropPrefix + "."
- + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR, "target");
- props.put(batchPropPrefix + "."
- + AuditFileSpool.PROP_FILE_SPOOL_DEST_RETRY_MS, ""
- + destRetryMS);
-
- AuditProviderFactory factory = AuditProviderFactory.getInstance();
- factory.init(props, "test");
-
- // FileAuditDestination fileDest = new FileAuditDestination();
- // fileDest.init(props, filePropPrefix);
- //
- // AuditBatchProcessor queue = new AuditBatchProcessor(fileDest);
- // queue.init(props, batchPropPrefix);
- // queue.start();
-
- AuditProvider queue = factory.getProvider();
-
- for (int i = 0; i < messageToSend; i++) {
- queue.log(createEvent());
- }
- // Let's wait for second
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- // ignore
- }
-
- queue.waitToComplete();
- queue.stop();
- queue.waitToComplete();
-
- assertTrue("File created", logFile.exists());
- try {
- List<AuthzAuditEvent> eventList = new ArrayList<AuthzAuditEvent>();
- int totalSum = 0;
- BufferedReader br = new BufferedReader(new FileReader(logFile));
- String line;
- int lastSeq = -1;
- boolean outOfSeq = false;
- while ((line = br.readLine()) != null) {
- AuthzAuditEvent event = MiscUtil.fromJson(line,
- AuthzAuditEvent.class);
- eventList.add(event);
- totalSum += event.getFrequencyCount();
- if (event.getSeqNum() <= lastSeq) {
- outOfSeq = true;
- }
- }
- br.close();
- assertEquals("Total count", messageToSend, eventList.size());
- assertEquals("Total sum", messageToSend, totalSum);
- assertFalse("Event not in sequnce", outOfSeq);
-
- } catch (Throwable e) {
- logger.error("Error opening file for reading.", e);
- assertTrue("Error reading file. fileName=" + logFile + ", error="
- + e.toString(), true);
- }
-
- }
-
- private AuthzAuditEvent createEvent() {
- AuthzAuditEvent event = new AuthzAuditEvent();
- event.setSeqNum(++seqNum);
- return event;
- }
-
- class TestConsumer extends AuditDestination {
-
- int countTotal = 0;
- int sumTotal = 0;
- int batchCount = 0;
- String providerName = getClass().getName();
- boolean isDown = false;
- int batchSize = 3;
-
- List<AuthzAuditEvent> eventList = new ArrayList<AuthzAuditEvent>();
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger
- * .audit.model.AuditEventBase)
- */
- @Override
- public boolean log(AuditEventBase event) {
- if (isDown) {
- return false;
- }
- countTotal++;
- if (event instanceof AuthzAuditEvent) {
- AuthzAuditEvent azEvent = (AuthzAuditEvent) event;
- sumTotal += azEvent.getFrequencyCount();
- logger.info("EVENT:" + event);
- eventList.add(azEvent);
- }
- return true;
- }
-
- @Override
- public boolean log(Collection<AuditEventBase> events) {
- if (isDown) {
- return false;
- }
- batchCount++;
- for (AuditEventBase event : events) {
- log(event);
- }
- return true;
- }
-
- @Override
- public boolean logJSON(String jsonStr) {
- if (isDown) {
- return false;
- }
- countTotal++;
- AuthzAuditEvent event = MiscUtil.fromJson(jsonStr,
- AuthzAuditEvent.class);
- sumTotal += event.getFrequencyCount();
- logger.info("JSON:" + jsonStr);
- eventList.add(event);
- return true;
- }
-
- @Override
- public boolean logJSON(Collection<String> events) {
- if (isDown) {
- return false;
- }
- for (String event : events) {
- logJSON(event);
- }
- return true;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#init(java.util.Properties
- * )
- */
- @Override
- public void init(Properties prop) {
- // Nothing to do here
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#start()
- */
- @Override
- public void start() {
- // Nothing to do here
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#stop()
- */
- @Override
- public void stop() {
- // Nothing to do here
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete()
- */
- @Override
- public void waitToComplete() {
- }
-
- @Override
- public int getMaxBatchSize() {
- return batchSize;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
- */
- @Override
- public boolean isFlushPending() {
- return false;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#getLastFlushTime()
- */
- @Override
- public long getLastFlushTime() {
- return 0;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#flush()
- */
- @Override
- public void flush() {
- // Nothing to do here
- }
-
- public int getCountTotal() {
- return countTotal;
- }
-
- public int getSumTotal() {
- return sumTotal;
- }
-
- public int getBatchCount() {
- return batchCount;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#init(java.util.Properties
- * , java.lang.String)
- */
- @Override
- public void init(Properties prop, String basePropertyName) {
-
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#waitToComplete(long)
- */
- @Override
- public void waitToComplete(long timeout) {
-
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#getName()
- */
- @Override
- public String getName() {
- return providerName;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#isDrain()
- */
- @Override
- public boolean isDrain() {
- return false;
- }
-
- // Local methods
- public AuthzAuditEvent isInSequence() {
- int lastSeq = -1;
- for (AuthzAuditEvent event : eventList) {
- if (event.getSeqNum() <= lastSeq) {
- return event;
- }
- }
- return null;
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/security-admin/src/test/java/org/apache/ranger/audit/TestAuditQueue.java
----------------------------------------------------------------------
diff --git a/security-admin/src/test/java/org/apache/ranger/audit/TestAuditQueue.java b/security-admin/src/test/java/org/apache/ranger/audit/TestAuditQueue.java
new file mode 100644
index 0000000..45477e2
--- /dev/null
+++ b/security-admin/src/test/java/org/apache/ranger/audit/TestAuditQueue.java
@@ -0,0 +1,704 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit;
+
+import static org.junit.Assert.*;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.destination.FileAuditDestination;
+import org.apache.ranger.audit.model.AuthzAuditEvent;
+import org.apache.ranger.audit.provider.AuditProvider;
+import org.apache.ranger.audit.provider.AuditProviderFactory;
+import org.apache.ranger.audit.provider.BaseAuditProvider;
+import org.apache.ranger.audit.provider.MiscUtil;
+import org.apache.ranger.audit.provider.MultiDestAuditProvider;
+import org.apache.ranger.audit.queue.AuditAsyncQueue;
+import org.apache.ranger.audit.queue.AuditBatchQueue;
+import org.apache.ranger.audit.queue.AuditFileSpool;
+import org.apache.ranger.audit.queue.AuditSummaryQueue;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestAuditQueue {
+
+ private static final Log logger = LogFactory.getLog(TestAuditQueue.class);
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ }
+
+ static private int seqNum = 0;
+
+ @Test
+ public void testAuditAsyncQueue() {
+ logger.debug("testAuditAsyncQueue()...");
+ TestConsumer testConsumer = new TestConsumer();
+ AuditAsyncQueue queue = new AuditAsyncQueue(testConsumer);
+ Properties props = new Properties();
+ queue.init(props);
+
+ queue.start();
+
+ int messageToSend = 10;
+ for (int i = 0; i < messageToSend; i++) {
+ queue.log(createEvent());
+ }
+ queue.stop();
+ queue.waitToComplete();
+ // Let's wait for second
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ assertEquals(messageToSend, testConsumer.getCountTotal());
+ assertEquals(messageToSend, testConsumer.getSumTotal());
+ assertNull("Event not in sequnce", testConsumer.isInSequence());
+ }
+
+ @Test
+ public void testAuditSummaryQueue() {
+ logger.debug("testAuditSummaryQueue()...");
+ TestConsumer testConsumer = new TestConsumer();
+ AuditSummaryQueue queue = new AuditSummaryQueue(testConsumer);
+
+ Properties props = new Properties();
+ props.put(BaseAuditProvider.PROP_DEFAULT_PREFIX + "."
+ + AuditSummaryQueue.PROP_SUMMARY_INTERVAL, "" + 300);
+ queue.init(props, BaseAuditProvider.PROP_DEFAULT_PREFIX);
+
+ queue.start();
+
+ commonTestSummary(testConsumer, queue);
+ }
+
+ private void commonTestSummary(TestConsumer testConsumer,
+ BaseAuditProvider queue) {
+ int messageToSend = 0;
+ int pauseMS = 330;
+
+ int countToCheck = 0;
+ try {
+
+ queue.log(createEvent("john", "select",
+ "xademo/customer_details/imei", true));
+ messageToSend++;
+ queue.log(createEvent("john", "select",
+ "xademo/customer_details/imei", true));
+ messageToSend++;
+ countToCheck++;
+ queue.log(createEvent("jane", "select",
+ "xademo/customer_details/imei", true));
+ messageToSend++;
+ countToCheck++;
+ Thread.sleep(pauseMS);
+
+ queue.log(createEvent("john", "select",
+ "xademo/customer_details/imei", true));
+ messageToSend++;
+ queue.log(createEvent("john", "select",
+ "xademo/customer_details/imei", true));
+ messageToSend++;
+ countToCheck++;
+ queue.log(createEvent("jane", "select",
+ "xademo/customer_details/imei", true));
+ messageToSend++;
+ countToCheck++;
+ Thread.sleep(pauseMS);
+
+ queue.log(createEvent("john", "select",
+ "xademo/customer_details/imei", true));
+ messageToSend++;
+ countToCheck++;
+ queue.log(createEvent("john", "select",
+ "xademo/customer_details/imei", false));
+ messageToSend++;
+ countToCheck++;
+ queue.log(createEvent("jane", "select",
+ "xademo/customer_details/imei", true));
+ messageToSend++;
+ countToCheck++;
+ Thread.sleep(pauseMS);
+
+ } catch (InterruptedException e1) {
+ logger.error("Sleep interupted", e1);
+ }
+ // Let's wait for second
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+
+ queue.waitToComplete();
+ queue.stop();
+ queue.waitToComplete();
+ // Let's wait for second
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ assertEquals(messageToSend, testConsumer.getSumTotal());
+ assertEquals(countToCheck, testConsumer.getCountTotal());
+ assertNull("Event not in sequnce", testConsumer.isInSequence());
+ }
+
+ @Test
+ public void testAuditSummaryByInfra() {
+ logger.debug("testAuditSummaryByInfra()...");
+
+ Properties props = new Properties();
+ // Destination
+ String propPrefix = AuditProviderFactory.AUDIT_DEST_BASE + ".test";
+ props.put(propPrefix, "enable");
+ props.put(BaseAuditProvider.PROP_DEFAULT_PREFIX + "." + "summary" + "."
+ + "enabled", "true");
+ props.put(propPrefix + "." + BaseAuditProvider.PROP_NAME, "test");
+ props.put(propPrefix + "." + BaseAuditProvider.PROP_QUEUE, "none");
+
+ props.put(BaseAuditProvider.PROP_DEFAULT_PREFIX + "."
+ + AuditSummaryQueue.PROP_SUMMARY_INTERVAL, "" + 300);
+ props.put(propPrefix + "." + BaseAuditProvider.PROP_CLASS_NAME,
+ TestConsumer.class.getName());
+
+ AuditProviderFactory factory = AuditProviderFactory.getInstance();
+ factory.init(props, "test");
+ BaseAuditProvider queue = (BaseAuditProvider) factory.getProvider();
+ BaseAuditProvider consumer = (BaseAuditProvider) queue.getConsumer();
+ while (consumer.getConsumer() != null) {
+ consumer = (BaseAuditProvider) consumer.getConsumer();
+ }
+ assertTrue("Consumer should be TestConsumer. class="
+ + consumer.getClass().getName(),
+ consumer instanceof TestConsumer);
+ TestConsumer testConsumer = (TestConsumer) consumer;
+ commonTestSummary(testConsumer, queue);
+ }
+
+ @Test
+ public void testMultipleQueue() {
+ logger.debug("testAuditAsyncQueue()...");
+ int destCount = 3;
+ TestConsumer[] testConsumer = new TestConsumer[destCount];
+
+ MultiDestAuditProvider multiQueue = new MultiDestAuditProvider();
+ for (int i = 0; i < destCount; i++) {
+ testConsumer[i] = new TestConsumer();
+ multiQueue.addAuditProvider(testConsumer[i]);
+ }
+
+ AuditAsyncQueue queue = new AuditAsyncQueue(multiQueue);
+ Properties props = new Properties();
+ queue.init(props);
+ queue.start();
+
+ int messageToSend = 10;
+ for (int i = 0; i < messageToSend; i++) {
+ queue.log(createEvent());
+ }
+ queue.stop();
+ queue.waitToComplete();
+ // Let's wait for second
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ for (int i = 0; i < destCount; i++) {
+ assertEquals("consumer" + i, messageToSend,
+ testConsumer[i].getCountTotal());
+ assertEquals("consumer" + i, messageToSend,
+ testConsumer[i].getSumTotal());
+
+ }
+ }
+
+ @Test
+ public void testAuditBatchQueueBySize() {
+ logger.debug("testAuditBatchQueue()...");
+ int messageToSend = 10;
+
+ String basePropName = "testAuditBatchQueueBySize_"
+ + MiscUtil.generateUniqueId();
+ int batchSize = messageToSend / 3;
+ int expectedBatchSize = batchSize
+ + (batchSize * 3 < messageToSend ? 1 : 0);
+ int queueSize = messageToSend * 2;
+ int intervalMS = messageToSend * 100; // Deliberately big interval
+ Properties props = new Properties();
+ props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ + batchSize);
+ props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ + queueSize);
+ props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
+ "" + intervalMS);
+
+ TestConsumer testConsumer = new TestConsumer();
+ AuditBatchQueue queue = new AuditBatchQueue(testConsumer);
+ queue.init(props, basePropName);
+ queue.start();
+
+ for (int i = 0; i < messageToSend; i++) {
+ queue.log(createEvent());
+
+ }
+ // Let's wait for second
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+
+ queue.waitToComplete();
+ queue.stop();
+ queue.waitToComplete();
+
+ assertEquals("Total count", messageToSend, testConsumer.getCountTotal());
+ assertEquals("Total sum", messageToSend, testConsumer.getSumTotal());
+ assertEquals("Total batch", expectedBatchSize,
+ testConsumer.getBatchCount());
+ assertNull("Event not in sequnce", testConsumer.isInSequence());
+
+ }
+
+ @Test
+ public void testAuditBatchQueueByTime() {
+ logger.debug("testAuditBatchQueue()...");
+
+ int messageToSend = 10;
+
+ String basePropName = "testAuditBatchQueueByTime_"
+ + MiscUtil.generateUniqueId();
+ int batchSize = messageToSend * 2; // Deliberately big size
+ int queueSize = messageToSend * 2;
+ int intervalMS = (1000 / messageToSend) * 3; // e.g (1000/10 * 3) = 300
+ // ms
+ int pauseMS = 1000 / messageToSend + 3; // e.g. 1000/10 + 3 = 103ms
+ int expectedBatchSize = (messageToSend * pauseMS) / intervalMS + 1;
+
+ Properties props = new Properties();
+ props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ + batchSize);
+ props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ + queueSize);
+ props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
+ "" + intervalMS);
+
+ TestConsumer testConsumer = new TestConsumer();
+ AuditBatchQueue queue = new AuditBatchQueue(testConsumer);
+ queue.init(props, basePropName);
+ queue.start();
+
+ for (int i = 0; i < messageToSend; i++) {
+ queue.log(createEvent());
+ try {
+ Thread.sleep(pauseMS);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ }
+ // Let's wait for second
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ queue.waitToComplete();
+ queue.stop();
+ queue.waitToComplete();
+
+ assertEquals("Total count", messageToSend, testConsumer.getCountTotal());
+ assertEquals("Total sum", messageToSend, testConsumer.getSumTotal());
+ assertEquals("Total batch", expectedBatchSize,
+ testConsumer.getBatchCount());
+ assertNull("Event not in sequnce", testConsumer.isInSequence());
+ }
+
+ @Test
+ public void testAuditBatchQueueDestDown() {
+ logger.debug("testAuditBatchQueueDestDown()...");
+ int messageToSend = 10;
+
+ String basePropName = "testAuditBatchQueueDestDown_"
+ + MiscUtil.generateUniqueId();
+ int batchSize = messageToSend / 3;
+ int queueSize = messageToSend * 2;
+ int intervalMS = Integer.MAX_VALUE; // Deliberately big interval
+ Properties props = new Properties();
+ props.put(basePropName + "." + BaseAuditProvider.PROP_NAME,
+ "testAuditBatchQueueDestDown");
+
+ props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ + batchSize);
+ props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ + queueSize);
+ props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
+ "" + intervalMS);
+
+ // Enable File Spooling
+ props.put(basePropName + "." + "filespool.enable", "" + true);
+ props.put(basePropName + "." + "filespool.dir", "target");
+
+ TestConsumer testConsumer = new TestConsumer();
+ testConsumer.isDown = true;
+
+ AuditBatchQueue queue = new AuditBatchQueue(testConsumer);
+ queue.init(props, basePropName);
+ queue.start();
+
+ for (int i = 0; i < messageToSend; i++) {
+ queue.log(createEvent());
+
+ }
+ // Let's wait for second
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+
+ queue.waitToComplete(5000);
+ queue.stop();
+ queue.waitToComplete();
+
+ assertEquals("Total count", 0, testConsumer.getCountTotal());
+ assertEquals("Total sum", 0, testConsumer.getSumTotal());
+ assertEquals("Total batch", 0, testConsumer.getBatchCount());
+ assertNull("Event not in sequnce", testConsumer.isInSequence());
+ }
+
+ @Test
+ public void testAuditBatchQueueDestDownFlipFlop() {
+ logger.debug("testAuditBatchQueueDestDownFlipFlop()...");
+ int messageToSend = 10;
+
+ String basePropName = "testAuditBatchQueueDestDownFlipFlop_"
+ + MiscUtil.generateUniqueId();
+ int batchSize = messageToSend / 3;
+ int queueSize = messageToSend * 2;
+ int intervalMS = 3000; // Deliberately big interval
+ Properties props = new Properties();
+ props.put(
+ basePropName + "." + BaseAuditProvider.PROP_NAME,
+ "testAuditBatchQueueDestDownFlipFlop_"
+ + MiscUtil.generateUniqueId());
+
+ props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ + batchSize);
+ props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ + queueSize);
+ props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
+ "" + intervalMS);
+
+ // Enable File Spooling
+ int destRetryMS = 10;
+ props.put(
+ basePropName + "." + BaseAuditProvider.PROP_FILE_SPOOL_ENABLE,
+ "" + true);
+ props.put(
+ basePropName + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR,
+ "target");
+ props.put(basePropName + "."
+ + AuditFileSpool.PROP_FILE_SPOOL_DEST_RETRY_MS, ""
+ + destRetryMS);
+
+ TestConsumer testConsumer = new TestConsumer();
+ testConsumer.isDown = false;
+
+ AuditBatchQueue queue = new AuditBatchQueue(testConsumer);
+ queue.init(props, basePropName);
+ queue.start();
+
+ try {
+ queue.log(createEvent());
+ queue.log(createEvent());
+ queue.log(createEvent());
+ Thread.sleep(1000);
+ testConsumer.isDown = true;
+ Thread.sleep(1000);
+ queue.log(createEvent());
+ queue.log(createEvent());
+ queue.log(createEvent());
+ Thread.sleep(1000);
+ testConsumer.isDown = false;
+ Thread.sleep(1000);
+ queue.log(createEvent());
+ queue.log(createEvent());
+ queue.log(createEvent());
+ Thread.sleep(1000);
+ testConsumer.isDown = true;
+ Thread.sleep(1000);
+ queue.log(createEvent());
+ Thread.sleep(1000);
+ testConsumer.isDown = false;
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ // Let's wait for second
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+
+ queue.waitToComplete(5000);
+ queue.stop();
+ queue.waitToComplete();
+
+ assertEquals("Total count", messageToSend, testConsumer.getCountTotal());
+ assertEquals("Total sum", messageToSend, testConsumer.getSumTotal());
+ assertNull("Event not in sequnce", testConsumer.isInSequence());
+
+ }
+
+ /**
+ * See if we recover after restart
+ */
+ @Test
+ public void testAuditBatchQueueDestDownRestart() {
+ logger.debug("testAuditBatchQueueDestDownRestart()...");
+ int messageToSend = 10;
+
+ String basePropName = "testAuditBatchQueueDestDownRestart_"
+ + MiscUtil.generateUniqueId();
+ int batchSize = messageToSend / 3;
+ int queueSize = messageToSend * 2;
+ int intervalMS = 3000; // Deliberately big interval
+ int maxArchivedFiles = 1;
+ Properties props = new Properties();
+ props.put(
+ basePropName + "." + BaseAuditProvider.PROP_NAME,
+ "testAuditBatchQueueDestDownRestart_"
+ + MiscUtil.generateUniqueId());
+
+ props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ + batchSize);
+ props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ + queueSize);
+ props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
+ "" + intervalMS);
+
+ // Enable File Spooling
+ int destRetryMS = 10;
+ props.put(
+ basePropName + "." + BaseAuditProvider.PROP_FILE_SPOOL_ENABLE,
+ "" + true);
+ props.put(
+ basePropName + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR,
+ "target");
+ props.put(basePropName + "."
+ + AuditFileSpool.PROP_FILE_SPOOL_DEST_RETRY_MS, ""
+ + destRetryMS);
+ props.put(basePropName + "."
+ + AuditFileSpool.PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, ""
+ + maxArchivedFiles);
+
+ TestConsumer testConsumer = new TestConsumer();
+ testConsumer.isDown = true;
+
+ AuditBatchQueue queue = new AuditBatchQueue(testConsumer);
+ queue.init(props, basePropName);
+ queue.start();
+
+ for (int i = 0; i < messageToSend; i++) {
+ queue.log(createEvent());
+
+ }
+ // Let's wait for second or two
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+
+ queue.waitToComplete(5000);
+ queue.stop();
+ queue.waitToComplete();
+
+ testConsumer.isDown = true;
+
+ // Let's wait for second or two
+ try {
+ Thread.sleep(5000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+
+ // Let's now recreate the objects
+ testConsumer = new TestConsumer();
+
+ queue = new AuditBatchQueue(testConsumer);
+ queue.init(props, basePropName);
+ queue.start();
+
+ // Let's wait for second
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+
+ queue.waitToComplete(5000);
+ queue.stop();
+ queue.waitToComplete();
+
+ assertEquals("Total count", messageToSend, testConsumer.getCountTotal());
+ assertEquals("Total sum", messageToSend, testConsumer.getSumTotal());
+ assertNull("Event not in sequnce", testConsumer.isInSequence());
+
+ }
+
+ @Test
+ public void testFileDestination() {
+ logger.debug("testFileDestination()...");
+
+ int messageToSend = 10;
+ int batchSize = messageToSend / 3;
+ int queueSize = messageToSend * 2;
+ int intervalMS = 500; // Should be less than final sleep time
+
+ String logFolderName = "target/testFileDestination";
+ File logFolder = new File(logFolderName);
+ String logFileName = "test_ranger_audit.log";
+ File logFile = new File(logFolder, logFileName);
+
+ Properties props = new Properties();
+ // Destination
+ String filePropPrefix = AuditProviderFactory.AUDIT_DEST_BASE + ".file";
+ props.put(filePropPrefix, "enable");
+ props.put(filePropPrefix + "." + BaseAuditProvider.PROP_NAME, "file");
+ props.put(filePropPrefix + "."
+ + FileAuditDestination.PROP_FILE_LOCAL_DIR, logFolderName);
+ props.put(filePropPrefix + "."
+ + FileAuditDestination.PROP_FILE_LOCAL_FILE_NAME_FORMAT,
+ "%app-type%_ranger_audit.log");
+ props.put(filePropPrefix + "."
+ + FileAuditDestination.PROP_FILE_FILE_ROLLOVER, "" + 10);
+
+ props.put(filePropPrefix + "." + BaseAuditProvider.PROP_QUEUE, "batch");
+ String batchPropPrefix = filePropPrefix + "." + "batch";
+
+ props.put(batchPropPrefix + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ + batchSize);
+ props.put(batchPropPrefix + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ + queueSize);
+ props.put(
+ batchPropPrefix + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
+ "" + intervalMS);
+
+ // Enable File Spooling
+ int destRetryMS = 10;
+ props.put(batchPropPrefix + "."
+ + BaseAuditProvider.PROP_FILE_SPOOL_ENABLE, "" + true);
+ props.put(batchPropPrefix + "."
+ + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR, "target");
+ props.put(batchPropPrefix + "."
+ + AuditFileSpool.PROP_FILE_SPOOL_DEST_RETRY_MS, ""
+ + destRetryMS);
+
+ AuditProviderFactory factory = AuditProviderFactory.getInstance();
+ factory.init(props, "test");
+
+ // FileAuditDestination fileDest = new FileAuditDestination();
+ // fileDest.init(props, filePropPrefix);
+ //
+ // AuditBatchQueue queue = new AuditBatchQueue(fileDest);
+ // queue.init(props, batchPropPrefix);
+ // queue.start();
+
+ AuditProvider queue = factory.getProvider();
+
+ for (int i = 0; i < messageToSend; i++) {
+ queue.log(createEvent());
+ }
+ // Let's wait for second
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+
+ queue.waitToComplete();
+ queue.stop();
+ queue.waitToComplete();
+
+ assertTrue("File created", logFile.exists());
+ try {
+ List<AuthzAuditEvent> eventList = new ArrayList<AuthzAuditEvent>();
+ int totalSum = 0;
+ BufferedReader br = new BufferedReader(new FileReader(logFile));
+ String line;
+ int lastSeq = -1;
+ boolean outOfSeq = false;
+ while ((line = br.readLine()) != null) {
+ AuthzAuditEvent event = MiscUtil.fromJson(line,
+ AuthzAuditEvent.class);
+ eventList.add(event);
+ totalSum += event.getEventCount();
+ if (event.getSeqNum() <= lastSeq) {
+ outOfSeq = true;
+ }
+ }
+ br.close();
+ assertEquals("Total count", messageToSend, eventList.size());
+ assertEquals("Total sum", messageToSend, totalSum);
+ assertFalse("Event not in sequnce", outOfSeq);
+
+ } catch (Throwable e) {
+ logger.error("Error opening file for reading.", e);
+ assertTrue("Error reading file. fileName=" + logFile + ", error="
+ + e.toString(), true);
+ }
+
+ }
+
+ private AuthzAuditEvent createEvent() {
+ AuthzAuditEvent event = new AuthzAuditEvent();
+ event.setSeqNum(++seqNum);
+ return event;
+ }
+
+ private AuthzAuditEvent createEvent(String user, String accessType,
+ String resource, boolean isAllowed) {
+ AuthzAuditEvent event = new AuthzAuditEvent();
+ event.setUser(user);
+ event.setAccessType(accessType);
+ event.setResourcePath(resource);
+ event.setAccessResult(isAllowed ? (short) 1 : (short) 0);
+
+ event.setSeqNum(++seqNum);
+ return event;
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/security-admin/src/test/java/org/apache/ranger/audit/TestConsumer.java
----------------------------------------------------------------------
diff --git a/security-admin/src/test/java/org/apache/ranger/audit/TestConsumer.java b/security-admin/src/test/java/org/apache/ranger/audit/TestConsumer.java
new file mode 100644
index 0000000..d4d50f0
--- /dev/null
+++ b/security-admin/src/test/java/org/apache/ranger/audit/TestConsumer.java
@@ -0,0 +1,248 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.destination.AuditDestination;
+import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.model.AuthzAuditEvent;
+import org.apache.ranger.audit.provider.MiscUtil;
+
+public class TestConsumer extends AuditDestination {
+ private static final Log logger = LogFactory.getLog(TestConsumer.class);
+
+ int countTotal = 0;
+ int sumTotal = 0;
+ int batchCount = 0;
+ String providerName = getClass().getName();
+ boolean isDown = false;
+ int batchSize = 3;
+
+ List<AuthzAuditEvent> eventList = new ArrayList<AuthzAuditEvent>();
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger
+ * .audit.model.AuditEventBase)
+ */
+ @Override
+ public boolean log(AuditEventBase event) {
+ if (isDown) {
+ return false;
+ }
+ countTotal++;
+ if (event instanceof AuthzAuditEvent) {
+ AuthzAuditEvent azEvent = (AuthzAuditEvent) event;
+ sumTotal += azEvent.getEventCount();
+ logger.info("EVENT:" + event);
+ eventList.add(azEvent);
+ }
+ return true;
+ }
+
+ @Override
+ public boolean log(Collection<AuditEventBase> events) {
+ if (isDown) {
+ return false;
+ }
+ batchCount++;
+ for (AuditEventBase event : events) {
+ log(event);
+ }
+ return true;
+ }
+
+ @Override
+ public boolean logJSON(String jsonStr) {
+ if (isDown) {
+ return false;
+ }
+ countTotal++;
+ AuthzAuditEvent event = MiscUtil.fromJson(jsonStr,
+ AuthzAuditEvent.class);
+ sumTotal += event.getEventCount();
+ logger.info("JSON:" + jsonStr);
+ eventList.add(event);
+ return true;
+ }
+
+ @Override
+ public boolean logJSON(Collection<String> events) {
+ if (isDown) {
+ return false;
+ }
+ batchCount++;
+ for (String event : events) {
+ logJSON(event);
+ }
+ return true;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#init(java.util.Properties
+ * )
+ */
+ @Override
+ public void init(Properties prop) {
+ // Nothing to do here
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#start()
+ */
+ @Override
+ public void start() {
+ // Nothing to do here
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#stop()
+ */
+ @Override
+ public void stop() {
+ // Nothing to do here
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete()
+ */
+ @Override
+ public void waitToComplete() {
+ }
+
+ @Override
+ public int getMaxBatchSize() {
+ return batchSize;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
+ */
+ @Override
+ public boolean isFlushPending() {
+ return false;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#getLastFlushTime()
+ */
+ @Override
+ public long getLastFlushTime() {
+ return 0;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#flush()
+ */
+ @Override
+ public void flush() {
+ // Nothing to do here
+ }
+
+ public int getCountTotal() {
+ return countTotal;
+ }
+
+ public int getSumTotal() {
+ return sumTotal;
+ }
+
+ public int getBatchCount() {
+ return batchCount;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#init(java.util.Properties
+ * , java.lang.String)
+ */
+ @Override
+ public void init(Properties prop, String basePropertyName) {
+
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#waitToComplete(long)
+ */
+ @Override
+ public void waitToComplete(long timeout) {
+
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#getName()
+ */
+ @Override
+ public String getName() {
+ return providerName;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#isDrain()
+ */
+ @Override
+ public boolean isDrain() {
+ return false;
+ }
+
+ // Local methods
+ public AuthzAuditEvent isInSequence() {
+ int lastSeq = -1;
+ for (AuthzAuditEvent event : eventList) {
+ if (event.getSeqNum() <= lastSeq) {
+ return event;
+ }
+ }
+ return null;
+ }
+}
[10/12] incubator-ranger git commit: Merge branch 'master' of
https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Posted by bo...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Project: http://git-wip-us.apache.org/repos/asf/incubator-ranger/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ranger/commit/42a0e254
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ranger/tree/42a0e254
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ranger/diff/42a0e254
Branch: refs/heads/master
Commit: 42a0e254a4f87c5ce661cc9c300aa03f3cd74ab5
Parents: d8542a2 2f8bcd2
Author: Don Bosco Durai <bo...@apache.org>
Authored: Wed Apr 22 09:13:27 2015 -0700
Committer: Don Bosco Durai <bo...@apache.org>
Committed: Wed Apr 22 09:13:27 2015 -0700
----------------------------------------------------------------------
.../ranger/plugin/util/KeySearchFilter.java | 116 +++++++
.../scripts/ranger-admin-services.sh | 39 ++-
kms/config/kms-webapp/dbks-site.xml | 2 +-
.../db/postgres/kms_core_db_postgres.sql | 12 +-
kms/scripts/install.properties | 2 +-
kms/scripts/setup.sh | 12 +
.../crypto/key/RangerKeyStoreProvider.java | 6 -
.../hadoop/crypto/key/kms/server/KMSACLs.java | 12 +-
.../hadoop/crypto/key/kms/server/KMSWebApp.java | 13 +-
.../kms/server/KeyAuthorizationKeyProvider.java | 4 +-
.../apache/ranger/entity/XXRangerKeyStore.java | 4 +-
.../apache/ranger/entity/XXRangerMasterKey.java | 4 +-
.../kms/authorizer/RangerKmsAuthorizer.java | 15 +-
.../ranger/services/kms/client/KMSClient.java | 156 ++++-----
.../services/kms/client/KMSResourceMgr.java | 27 +-
security-admin/src/bin/ranger_install.py | 93 +++--
.../java/org/apache/ranger/biz/KmsKeyMgr.java | 317 +++++++++++++++++
.../org/apache/ranger/common/AppConstants.java | 6 +-
.../java/org/apache/ranger/rest/XKeyREST.java | 169 +++++++++
.../java/org/apache/ranger/view/VXKmsKey.java | 239 +++++++++++++
.../org/apache/ranger/view/VXKmsKeyList.java | 84 +++++
.../src/main/webapp/WEB-INF/log4j.xml | 4 +-
.../collection_bases/VXKmsKeyListBase.js | 68 ++++
.../webapp/scripts/collections/VXKmsKeyList.js | 36 ++
.../webapp/scripts/controllers/Controller.js | 37 ++
.../webapp/scripts/model_bases/VXKmsKeyBase.js | 103 ++++++
.../src/main/webapp/scripts/models/VXKmsKey.js | 54 +++
.../src/main/webapp/scripts/modules/XALinks.js | 34 +-
.../scripts/modules/globalize/message/en.js | 17 +-
.../src/main/webapp/scripts/routers/Router.js | 12 +-
.../src/main/webapp/scripts/utils/XAGlobals.js | 12 +-
.../webapp/scripts/views/kms/KMSTableLayout.js | 345 +++++++++++++++++++
.../webapp/scripts/views/kms/KmsKeyCreate.js | 186 ++++++++++
.../main/webapp/scripts/views/kms/KmsKeyForm.js | 107 ++++++
.../webapp/templates/common/TopNav_tmpl.html | 12 +-
.../webapp/templates/kms/KmsKeyCreate_tmpl.html | 30 ++
.../webapp/templates/kms/KmsKeyForm_tmpl.html | 19 +
.../templates/kms/KmsTableLayout_tmpl.html | 46 +++
38 files changed, 2222 insertions(+), 232 deletions(-)
----------------------------------------------------------------------
[07/12] incubator-ranger git commit: Merge branch 'master' of
https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Posted by bo...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Project: http://git-wip-us.apache.org/repos/asf/incubator-ranger/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ranger/commit/b7eafa41
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ranger/tree/b7eafa41
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ranger/diff/b7eafa41
Branch: refs/heads/master
Commit: b7eafa41f48bce00d9bc9e6473cc08078a071412
Parents: 6874ec0 46b5ecc
Author: Don Bosco Durai <bo...@apache.org>
Authored: Tue Apr 21 12:38:48 2015 -0700
Committer: Don Bosco Durai <bo...@apache.org>
Committed: Tue Apr 21 12:38:48 2015 -0700
----------------------------------------------------------------------
agents-common/scripts/upgrade-plugin.py | 160 +++++++++
agents-common/scripts/upgrade-plugin.sh | 29 ++
.../scripts/ranger-admin-site-template.xml | 223 +++++++++++++
security-admin/scripts/upgrade.sh | 33 ++
security-admin/scripts/upgrade_admin.py | 321 +++++++++++++++++++
src/main/assembly/admin-web.xml | 10 +
src/main/assembly/hbase-agent.xml | 11 +
src/main/assembly/hdfs-agent.xml | 11 +
src/main/assembly/hive-agent.xml | 11 +
src/main/assembly/knox-agent.xml | 11 +
src/main/assembly/storm-agent.xml | 11 +
11 files changed, 831 insertions(+)
----------------------------------------------------------------------
[05/12] incubator-ranger git commit: Merge branch 'master' of
https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Posted by bo...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Project: http://git-wip-us.apache.org/repos/asf/incubator-ranger/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ranger/commit/3a98f4ec
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ranger/tree/3a98f4ec
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ranger/diff/3a98f4ec
Branch: refs/heads/master
Commit: 3a98f4ec9a92ce514a3ad278550fd634cd38836a
Parents: 63099f3 0f56f82
Author: Don Bosco Durai <bo...@apache.org>
Authored: Mon Apr 20 16:06:19 2015 -0700
Committer: Don Bosco Durai <bo...@apache.org>
Committed: Mon Apr 20 16:06:19 2015 -0700
----------------------------------------------------------------------
hive-agent/pom.xml | 6 ++++++
.../org/apache/ranger/services/hive/client/HiveClient.java | 6 ++++++
.../apache/ranger/services/hive/client/HiveResourceMgr.java | 4 ++--
src/main/assembly/admin-web.xml | 1 +
4 files changed, 15 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
[09/12] incubator-ranger git commit: Merge branch 'master' of
https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Posted by bo...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Project: http://git-wip-us.apache.org/repos/asf/incubator-ranger/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ranger/commit/d8542a27
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ranger/tree/d8542a27
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ranger/diff/d8542a27
Branch: refs/heads/master
Commit: d8542a270906ce896dec3eaa3303de5d41e4113f
Parents: d6716f6 35c490d
Author: Don Bosco Durai <bo...@apache.org>
Authored: Tue Apr 21 22:56:05 2015 -0700
Committer: Don Bosco Durai <bo...@apache.org>
Committed: Tue Apr 21 22:56:05 2015 -0700
----------------------------------------------------------------------
.../model/validation/RangerPolicyValidator.java | 2 +-
.../validation/RangerValidatorFactory.java | 36 ----------
.../org/apache/ranger/biz/RangerBizUtil.java | 5 +-
.../java/org/apache/ranger/biz/UserMgr.java | 7 +-
.../java/org/apache/ranger/common/GUIDUtil.java | 13 ++--
.../ranger/common/RangerValidatorFactory.java | 43 ++++++++++++
.../org/apache/ranger/common/ServiceUtil.java | 3 +-
.../java/org/apache/ranger/rest/AssetREST.java | 5 +-
.../org/apache/ranger/rest/ServiceREST.java | 31 +++++++--
.../RangerSecurityContextFormationFilter.java | 7 +-
.../ranger/service/RangerPolicyServiceBase.java | 6 +-
.../service/RangerServiceDefServiceBase.java | 5 +-
.../service/RangerServiceServiceBase.java | 8 ++-
.../java/org/apache/ranger/view/VXResource.java | 25 +++++++
.../org/apache/ranger/rest/TestServiceREST.java | 2 +-
.../rest/TestServiceRESTForValidation.java | 70 +++++++++++++++-----
.../TestRangerServiceDefServiceBase.java | 5 ++
17 files changed, 196 insertions(+), 77 deletions(-)
----------------------------------------------------------------------
[08/12] incubator-ranger git commit: Merge branch 'master' of
https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Posted by bo...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Project: http://git-wip-us.apache.org/repos/asf/incubator-ranger/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ranger/commit/d6716f67
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ranger/tree/d6716f67
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ranger/diff/d6716f67
Branch: refs/heads/master
Commit: d6716f6704b7e359e7551b5876619b3c4e2c77ee
Parents: b7eafa4 c5716e5
Author: Don Bosco Durai <bo...@apache.org>
Authored: Tue Apr 21 18:07:17 2015 -0700
Committer: Don Bosco Durai <bo...@apache.org>
Committed: Tue Apr 21 18:07:17 2015 -0700
----------------------------------------------------------------------
docs/src/site/apt/index.apt.vm | 8 ++++++++
docs/src/site/site.xml | 8 +++++++-
2 files changed, 15 insertions(+), 1 deletion(-)
----------------------------------------------------------------------
[04/12] incubator-ranger git commit: Merge branch 'master' of
https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Posted by bo...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Project: http://git-wip-us.apache.org/repos/asf/incubator-ranger/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ranger/commit/63099f37
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ranger/tree/63099f37
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ranger/diff/63099f37
Branch: refs/heads/master
Commit: 63099f378a8b759deeb9766220348df37c50ef6e
Parents: 236f1ba da70024
Author: Don Bosco Durai <bo...@apache.org>
Authored: Mon Apr 20 10:47:58 2015 -0700
Committer: Don Bosco Durai <bo...@apache.org>
Committed: Mon Apr 20 10:47:58 2015 -0700
----------------------------------------------------------------------
kms/scripts/db/oracle/kms_core_db_oracle.sql | 31 +++++++++++++
.../db/postgres/kms_core_db_postgres.sql | 34 +++++++++++++++
.../db/sqlserver/kms_core_db_sqlserver.sql | 46 ++++++++++++++++++++
kms/scripts/db_setup.py | 2 +-
kms/scripts/dba_script.py | 2 +-
kms/scripts/install.properties | 6 +--
.../db/mysql/patches/013-permissionmodel.sql | 2 +-
.../db/oracle/patches/013-permissionmodel.sql | 2 +-
.../db/postgres/xa_core_db_postgres.sql | 2 +-
.../db/sqlserver/xa_core_db_sqlserver.sql | 2 +-
10 files changed, 120 insertions(+), 9 deletions(-)
----------------------------------------------------------------------
[06/12] incubator-ranger git commit: Merge branch 'master' of
https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Posted by bo...@apache.org.
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/incubator-ranger.git
Project: http://git-wip-us.apache.org/repos/asf/incubator-ranger/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ranger/commit/6874ec04
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ranger/tree/6874ec04
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ranger/diff/6874ec04
Branch: refs/heads/master
Commit: 6874ec04e1e747ed23f76a71b744f9bb43993acb
Parents: 3a98f4e 853a932
Author: Don Bosco Durai <bo...@apache.org>
Authored: Mon Apr 20 21:08:50 2015 -0700
Committer: Don Bosco Durai <bo...@apache.org>
Committed: Mon Apr 20 21:08:50 2015 -0700
----------------------------------------------------------------------
kms/scripts/ranger-kms | 140 +++++++++++++++++++-------------
kms/scripts/ranger-kms-initd | 76 +++++++++++++++++
kms/scripts/ranger-kms-services.sh | 100 -----------------------
kms/scripts/setup.sh | 4 +-
4 files changed, 160 insertions(+), 160 deletions(-)
----------------------------------------------------------------------
[02/12] incubator-ranger git commit: RANGER-276 Add support for
aggregating audit logs at source
Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java
index ec5e9a8..ab6a74a 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java
@@ -24,7 +24,7 @@ import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
public abstract class BufferedAuditProvider extends BaseAuditProvider {
- private LogBuffer<AuditEventBase> mBuffer = null;
+ private LogBuffer<AuditEventBase> mBuffer = null;
private LogDestination<AuditEventBase> mDestination = null;
@Override
@@ -34,34 +34,39 @@ public abstract class BufferedAuditProvider extends BaseAuditProvider {
@Override
public boolean log(AuditEventBase event) {
- if(event instanceof AuthzAuditEvent) {
- AuthzAuditEvent authzEvent = (AuthzAuditEvent)event;
+ if (event instanceof AuthzAuditEvent) {
+ AuthzAuditEvent authzEvent = (AuthzAuditEvent) event;
- if(authzEvent.getAgentHostname() == null) {
+ if (authzEvent.getAgentHostname() == null) {
authzEvent.setAgentHostname(MiscUtil.getHostname());
}
- if(authzEvent.getLogType() == null) {
+ if (authzEvent.getLogType() == null) {
authzEvent.setLogType("RangerAudit");
}
- if(authzEvent.getEventId() == null) {
+ if (authzEvent.getEventId() == null) {
authzEvent.setEventId(MiscUtil.generateUniqueId());
}
}
- if(! mBuffer.add(event)) {
+ if (!mBuffer.add(event)) {
logFailedEvent(event);
+ return false;
}
return true;
}
@Override
public boolean log(Collection<AuditEventBase> events) {
+ boolean ret = true;
for (AuditEventBase event : events) {
- log(event);
+ ret = log(event);
+ if (!ret) {
+ break;
+ }
}
- return true;
+ return ret;
}
@Override
@@ -73,8 +78,12 @@ public abstract class BufferedAuditProvider extends BaseAuditProvider {
@Override
public boolean logJSON(Collection<String> events) {
+ boolean ret = true;
for (String event : events) {
- logJSON(event);
+ ret = logJSON(event);
+ if (!ret) {
+ break;
+ }
}
return false;
}
@@ -93,7 +102,6 @@ public abstract class BufferedAuditProvider extends BaseAuditProvider {
public void waitToComplete() {
}
-
@Override
public void waitToComplete(long timeout) {
}
@@ -120,9 +128,9 @@ public abstract class BufferedAuditProvider extends BaseAuditProvider {
return mDestination;
}
- protected void setBufferAndDestination(LogBuffer<AuditEventBase> buffer,
- LogDestination<AuditEventBase> destination) {
- mBuffer = buffer;
+ protected void setBufferAndDestination(LogBuffer<AuditEventBase> buffer,
+ LogDestination<AuditEventBase> destination) {
+ mBuffer = buffer;
mDestination = destination;
}
}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java
index f4976fb..f4bd90c 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java
@@ -31,6 +31,7 @@ import javax.persistence.Persistence;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.dao.DaoManager;
+import org.apache.ranger.audit.destination.AuditDestination;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
import org.apache.ranger.authorization.hadoop.utils.RangerCredentialProvider;
@@ -120,10 +121,14 @@ public class DbAuditProvider extends AuditDestination {
@Override
public boolean log(Collection<AuditEventBase> events) {
+ boolean ret = true;
for (AuditEventBase event : events) {
- log(event);
+ ret = log(event);
+ if(!ret) {
+ break;
+ }
}
- return true;
+ return ret;
}
@Override
@@ -135,10 +140,14 @@ public class DbAuditProvider extends AuditDestination {
@Override
public boolean logJSON(Collection<String> events) {
+ boolean ret = true;
for (String event : events) {
- logJSON(event);
+ ret = logJSON(event);
+ if( !ret ) {
+ break;
+ }
}
- return false;
+ return ret;
}
@Override
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/FileAuditDestination.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/FileAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/FileAuditDestination.java
deleted file mode 100644
index 62ecab1..0000000
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/FileAuditDestination.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ranger.audit.provider;
-
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.ranger.audit.model.AuditEventBase;
-
-/**
- * This class write the logs to local file
- */
-public class FileAuditDestination extends AuditDestination {
- private static final Log logger = LogFactory
- .getLog(FileAuditDestination.class);
-
- public static final String PROP_FILE_LOCAL_DIR = "dir";
- public static final String PROP_FILE_LOCAL_FILE_NAME_FORMAT = "filename.format";
- public static final String PROP_FILE_FILE_ROLLOVER = "file.rollover.sec";
-
- String baseFolder = null;
- String fileFormat = null;
- int fileRolloverSec = 24 * 60 * 60; // In seconds
- private String logFileNameFormat;
-
- boolean initDone = false;
-
- private File logFolder;
- PrintWriter logWriter = null;
-
- private Date fileCreateTime = null;
-
- private String currentFileName;
-
- private boolean isStopped = false;
-
- @Override
- public void init(Properties prop, String propPrefix) {
- super.init(prop, propPrefix);
-
- // Initialize properties for this class
- // Initial folder and file properties
- String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
- + "." + PROP_FILE_LOCAL_DIR);
- logFileNameFormat = MiscUtil.getStringProperty(props, propPrefix + "."
- + PROP_FILE_LOCAL_FILE_NAME_FORMAT);
- fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "."
- + PROP_FILE_FILE_ROLLOVER, fileRolloverSec);
-
- if (logFolderProp == null || logFolderProp.isEmpty()) {
- logger.error("File destination folder is not configured. Please set "
- + propPrefix
- + "."
- + PROP_FILE_LOCAL_DIR
- + ". name="
- + getName());
- return;
- }
- logFolder = new File(logFolderProp);
- if (!logFolder.isDirectory()) {
- logFolder.mkdirs();
- if (!logFolder.isDirectory()) {
- logger.error("FileDestination folder not found and can't be created. folder="
- + logFolder.getAbsolutePath() + ", name=" + getName());
- return;
- }
- }
- logger.info("logFolder=" + logFolder + ", name=" + getName());
-
- if (logFileNameFormat == null || logFileNameFormat.isEmpty()) {
- logFileNameFormat = "%app-type%_ranger_audit.log";
- }
-
- logger.info("logFileNameFormat=" + logFileNameFormat + ", destName="
- + getName());
-
- initDone = true;
- }
-
- @Override
- public boolean logJSON(Collection<String> events) {
- try {
- PrintWriter out = getLogFileStream();
- for (String event : events) {
- out.println(event);
- }
- out.flush();
- } catch (Throwable t) {
- logError("Error writing to log file.", t);
- return false;
- }
- return true;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#log(java.util.Collection)
- */
- @Override
- synchronized public boolean log(Collection<AuditEventBase> events) {
- if (isStopped) {
- logError("log() called after stop was requested. name=" + getName());
- return false;
- }
- List<String> jsonList = new ArrayList<String>();
- for (AuditEventBase event : events) {
- try {
- jsonList.add(MiscUtil.stringify(event));
- } catch (Throwable t) {
- logger.error("Error converting to JSON. event=" + event);
- }
- }
- return logJSON(jsonList);
-
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#start()
- */
- @Override
- public void start() {
- // Nothing to do here. We will open the file when the first log request
- // comes
- }
-
- @Override
- synchronized public void stop() {
- if (logWriter != null) {
- logWriter.flush();
- logWriter.close();
- logWriter = null;
- isStopped = true;
- }
- }
-
- // Helper methods in this class
- synchronized private PrintWriter getLogFileStream() throws Exception {
- closeFileIfNeeded();
-
- // Either there are no open log file or the previous one has been rolled
- // over
- if (logWriter == null) {
- Date currentTime = new Date();
- // Create a new file
- String fileName = MiscUtil.replaceTokens(logFileNameFormat,
- currentTime.getTime());
- File outLogFile = new File(logFolder, fileName);
- if (outLogFile.exists()) {
- // Let's try to get the next available file
- int i = 0;
- while (true) {
- i++;
- int lastDot = fileName.lastIndexOf('.');
- String baseName = fileName.substring(0, lastDot);
- String extension = fileName.substring(lastDot);
- String newFileName = baseName + "." + i + extension;
- File newLogFile = new File(logFolder, newFileName);
- if (!newLogFile.exists()) {
- // Move the file
- if (!outLogFile.renameTo(newLogFile)) {
- logger.error("Error renameing file. " + outLogFile
- + " to " + newLogFile);
- }
- break;
- }
- }
- }
- if (!outLogFile.exists()) {
- logger.info("Creating new file. destName=" + getName()
- + ", fileName=" + fileName);
- // Open the file
- logWriter = new PrintWriter(new BufferedWriter(new FileWriter(
- outLogFile)));
- } else {
- logWriter = new PrintWriter(new BufferedWriter(new FileWriter(
- outLogFile, true)));
- }
- fileCreateTime = new Date();
- currentFileName = outLogFile.getPath();
- }
- return logWriter;
- }
-
- private void closeFileIfNeeded() throws FileNotFoundException, IOException {
- if (logWriter == null) {
- return;
- }
- if (System.currentTimeMillis() - fileCreateTime.getTime() > fileRolloverSec * 1000) {
- logger.info("Closing file. Rolling over. name=" + getName()
- + ", fileName=" + currentFileName);
- logWriter.flush();
- logWriter.close();
- logWriter = null;
- currentFileName = null;
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/HDFSAuditDestination.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/HDFSAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/HDFSAuditDestination.java
deleted file mode 100644
index a36c40f..0000000
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/HDFSAuditDestination.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ranger.audit.provider;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.ranger.audit.model.AuditEventBase;
-
-/**
- * This class write the logs to local file
- */
-public class HDFSAuditDestination extends AuditDestination {
- private static final Log logger = LogFactory
- .getLog(HDFSAuditDestination.class);
-
- public static final String PROP_HDFS_DIR = "dir";
- public static final String PROP_HDFS_SUBDIR = "subdir";
- public static final String PROP_HDFS_FILE_NAME_FORMAT = "filename.format";
- public static final String PROP_HDFS_ROLLOVER = "file.rollover.sec";
-
- String baseFolder = null;
- String fileFormat = null;
- int fileRolloverSec = 24 * 60 * 60; // In seconds
- private String logFileNameFormat;
-
- boolean initDone = false;
-
- private String logFolder;
- PrintWriter logWriter = null;
-
- private Date fileCreateTime = null;
-
- private String currentFileName;
-
- private boolean isStopped = false;
-
- @Override
- public void init(Properties prop, String propPrefix) {
- super.init(prop, propPrefix);
-
- // Initialize properties for this class
- // Initial folder and file properties
- String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
- + "." + PROP_HDFS_DIR);
- String logSubFolder = MiscUtil.getStringProperty(props, propPrefix
- + "." + PROP_HDFS_SUBDIR);
- if (logSubFolder == null || logSubFolder.isEmpty()) {
- logSubFolder = "%app-type%/%time:yyyyMMdd%";
- }
-
- logFileNameFormat = MiscUtil.getStringProperty(props, propPrefix + "."
- + PROP_HDFS_FILE_NAME_FORMAT);
- fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "."
- + PROP_HDFS_ROLLOVER, fileRolloverSec);
-
- if (logFileNameFormat == null || logFileNameFormat.isEmpty()) {
- logFileNameFormat = "%app-type%_ranger_audit_%hostname%" + ".log";
- }
-
- if (logFolderProp == null || logFolderProp.isEmpty()) {
- logger.fatal("File destination folder is not configured. Please set "
- + propPrefix + "." + PROP_HDFS_DIR + ". name=" + getName());
- return;
- }
-
- logFolder = logFolderProp + "/" + logSubFolder;
- logger.info("logFolder=" + logFolder + ", destName=" + getName());
- logger.info("logFileNameFormat=" + logFileNameFormat + ", destName="
- + getName());
-
- initDone = true;
- }
-
- @Override
- public boolean logJSON(Collection<String> events) {
- try {
- PrintWriter out = getLogFileStream();
- for (String event : events) {
- out.println(event);
- }
- out.flush();
- } catch (Throwable t) {
- logError("Error writing to log file.", t);
- return false;
- }
- return true;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#log(java.util.Collection)
- */
- @Override
- synchronized public boolean log(Collection<AuditEventBase> events) {
- if (isStopped) {
- logError("log() called after stop was requested. name=" + getName());
- return false;
- }
- List<String> jsonList = new ArrayList<String>();
- for (AuditEventBase event : events) {
- try {
- jsonList.add(MiscUtil.stringify(event));
- } catch (Throwable t) {
- logger.error("Error converting to JSON. event=" + event);
- }
- }
- return logJSON(jsonList);
-
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#start()
- */
- @Override
- public void start() {
- // Nothing to do here. We will open the file when the first log request
- // comes
- }
-
- @Override
- synchronized public void stop() {
- try {
- if (logWriter != null) {
- logWriter.flush();
- logWriter.close();
- logWriter = null;
- isStopped = true;
- }
- } catch (Throwable t) {
- logger.error("Error closing HDFS file.", t);
- }
- }
-
- // Helper methods in this class
- synchronized private PrintWriter getLogFileStream() throws Throwable {
- closeFileIfNeeded();
-
- // Either there are no open log file or the previous one has been rolled
- // over
- if (logWriter == null) {
- Date currentTime = new Date();
- // Create a new file
- String fileName = MiscUtil.replaceTokens(logFileNameFormat,
- currentTime.getTime());
- String parentFolder = MiscUtil.replaceTokens(logFolder,
- currentTime.getTime());
- Configuration conf = new Configuration();
-
- String fullPath = parentFolder
- + org.apache.hadoop.fs.Path.SEPARATOR + fileName;
- String defaultPath = fullPath;
- URI uri = URI.create(fullPath);
- FileSystem fileSystem = FileSystem.get(uri, conf);
-
- Path hdfPath = new Path(fullPath);
- logger.info("Checking whether log file exists. hdfPath=" + fullPath);
- int i = 0;
- while (fileSystem.exists(hdfPath)) {
- i++;
- int lastDot = defaultPath.lastIndexOf('.');
- String baseName = defaultPath.substring(0, lastDot);
- String extension = defaultPath.substring(lastDot);
- fullPath = baseName + "." + i + extension;
- hdfPath = new Path(fullPath);
- logger.info("Checking whether log file exists. hdfPath=" + fullPath);
- }
- logger.info("Log file doesn't exists. Will create and use it. hdfPath=" + fullPath);
- // Create parent folders
- createParents(hdfPath, fileSystem);
-
- // Create the file to write
- logger.info("Creating new log file. hdfPath=" + fullPath);
- FSDataOutputStream ostream = fileSystem.create(hdfPath);
- logWriter = new PrintWriter(ostream);
- fileCreateTime = new Date();
- currentFileName = fullPath;
- }
- return logWriter;
- }
-
- private void createParents(Path pathLogfile, FileSystem fileSystem)
- throws Throwable {
- logger.info("Creating parent folder for " + pathLogfile);
- Path parentPath = pathLogfile != null ? pathLogfile.getParent() : null;
-
- if (parentPath != null && fileSystem != null
- && !fileSystem.exists(parentPath)) {
- fileSystem.mkdirs(parentPath);
- }
- }
-
- private void closeFileIfNeeded() throws FileNotFoundException, IOException {
- if (logWriter == null) {
- return;
- }
- // TODO: Close the file on absolute time. Currently it is implemented as
- // relative time
- if (System.currentTimeMillis() - fileCreateTime.getTime() > fileRolloverSec * 1000) {
- logger.info("Closing file. Rolling over. name=" + getName()
- + ", fileName=" + currentFileName);
- logWriter.flush();
- logWriter.close();
- logWriter = null;
- currentFileName = null;
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java
index a5a52a0..040a045 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java
@@ -23,6 +23,7 @@ import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.destination.AuditDestination;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java
index 57ac0a0..876fa5b 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java
@@ -69,6 +69,8 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
public void addAuditProviders(List<AuditProvider> providers) {
if (providers != null) {
for (AuditProvider provider : providers) {
+ LOG.info("Adding " + provider.getName()
+ + " as consumer to MultiDestination " + getName());
addAuditProvider(provider);
}
}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java
new file mode 100644
index 0000000..a6f291d
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.queue;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.concurrent.LinkedTransferQueue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.provider.AuditProvider;
+import org.apache.ranger.audit.provider.BaseAuditProvider;
+
+/**
+ * This is a non-blocking queue with no limit on capacity.
+ */
+public class AuditAsyncQueue extends BaseAuditProvider implements Runnable {
+ private static final Log logger = LogFactory.getLog(AuditAsyncQueue.class);
+
+ LinkedTransferQueue<AuditEventBase> queue = new LinkedTransferQueue<AuditEventBase>();
+ Thread consumerThread = null;
+
+ static final int MAX_DRAIN = 1000;
+ static int threadCount = 0;
+ static final String DEFAULT_NAME = "async";
+
+ public AuditAsyncQueue() {
+ setName(DEFAULT_NAME);
+ }
+
+ public AuditAsyncQueue(AuditProvider consumer) {
+ super(consumer);
+ setName(DEFAULT_NAME);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger.
+ * audit.model.AuditEventBase)
+ */
+ @Override
+ public boolean log(AuditEventBase event) {
+ // Add to the queue and return ASAP
+ if (queue.size() >= getMaxQueueSize()) {
+ return false;
+ }
+ queue.add(event);
+ addLifeTimeInLogCount(1);
+ return true;
+ }
+
+ @Override
+ public boolean log(Collection<AuditEventBase> events) {
+ boolean ret = true;
+ for (AuditEventBase event : events) {
+ ret = log(event);
+ if (!ret) {
+ break;
+ }
+ }
+ return ret;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#start()
+ */
+ @Override
+ public void start() {
+ if (consumer != null) {
+ consumer.start();
+ }
+
+ consumerThread = new Thread(this, this.getClass().getName()
+ + (threadCount++));
+ consumerThread.setDaemon(true);
+ consumerThread.start();
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#stop()
+ */
+ @Override
+ public void stop() {
+ setDrain(true);
+ try {
+ if (consumerThread != null) {
+ consumerThread.interrupt();
+ }
+ consumerThread = null;
+ } catch (Throwable t) {
+ // ignore any exception
+ }
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
+ */
+ @Override
+ public boolean isFlushPending() {
+ if (queue.isEmpty()) {
+ return consumer.isFlushPending();
+ }
+ return true;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Runnable#run()
+ */
+ @Override
+ public void run() {
+ while (true) {
+ try {
+ AuditEventBase event = null;
+ if (!isDrain()) {
+ // For Transfer queue take() is blocking
+ event = queue.take();
+ } else {
+ // For Transfer queue poll() is non blocking
+ event = queue.poll();
+ }
+ if (event != null) {
+ Collection<AuditEventBase> eventList = new ArrayList<AuditEventBase>();
+ eventList.add(event);
+ queue.drainTo(eventList, MAX_DRAIN - 1);
+ consumer.log(eventList);
+ }
+ } catch (InterruptedException e) {
+ logger.info(
+ "Caught exception in consumer thread. Mostly to about loop",
+ e);
+ } catch (Throwable t) {
+ logger.error("Caught error during processing request.", t);
+ }
+ if (isDrain() && queue.isEmpty()) {
+ break;
+ }
+ }
+ try {
+ // Call stop on the consumer
+ consumer.stop();
+ } catch (Throwable t) {
+ logger.error("Error while calling stop on consumer.", t);
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java
new file mode 100644
index 0000000..5e21efc
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java
@@ -0,0 +1,346 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.queue;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Properties;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.provider.AuditProvider;
+import org.apache.ranger.audit.provider.BaseAuditProvider;
+
+public class AuditBatchQueue extends BaseAuditProvider implements Runnable {
+ private static final Log logger = LogFactory.getLog(AuditBatchQueue.class);
+
+ private BlockingQueue<AuditEventBase> queue = null;
+ private Collection<AuditEventBase> localBatchBuffer = new ArrayList<AuditEventBase>();
+
+ Thread consumerThread = null;
+ static int threadCount = 0;
+
+ public AuditBatchQueue() {
+ }
+
+ public AuditBatchQueue(AuditProvider consumer) {
+ super(consumer);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger.
+ * audit.model.AuditEventBase)
+ */
+ @Override
+ public boolean log(AuditEventBase event) {
+ // Add to batchQueue. Block if full
+ queue.add(event);
+ addLifeTimeInLogCount(1);
+ return true;
+ }
+
+ @Override
+ public boolean log(Collection<AuditEventBase> events) {
+ boolean ret = true;
+ for (AuditEventBase event : events) {
+ ret = log(event);
+ if (!ret) {
+ break;
+ }
+ }
+ return ret;
+ }
+
+ @Override
+ public void init(Properties prop, String basePropertyName) {
+ String propPrefix = "xasecure.audit.batch";
+ if (basePropertyName != null) {
+ propPrefix = basePropertyName;
+ }
+
+ super.init(prop, propPrefix);
+
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#start()
+ */
+ @Override
+ synchronized public void start() {
+ if (consumerThread != null) {
+ logger.error("Provider is already started. name=" + getName());
+ return;
+ }
+ logger.info("Creating ArrayBlockingQueue with maxSize="
+ + getMaxQueueSize());
+ queue = new ArrayBlockingQueue<AuditEventBase>(getMaxQueueSize());
+
+ // Start the consumer first
+ consumer.start();
+
+ // Then the FileSpooler
+ if (fileSpoolerEnabled) {
+ fileSpooler.start();
+ }
+
+ // Finally the queue listener
+ consumerThread = new Thread(this, this.getClass().getName()
+ + (threadCount++));
+ consumerThread.setDaemon(true);
+ consumerThread.start();
+
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#stop()
+ */
+ @Override
+ public void stop() {
+ setDrain(true);
+ flush();
+ try {
+ if (consumerThread != null) {
+ consumerThread.interrupt();
+ }
+ consumerThread = null;
+ } catch (Throwable t) {
+ // ignore any exception
+ }
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete()
+ */
+ @Override
+ public void waitToComplete() {
+ int defaultTimeOut = -1;
+ waitToComplete(defaultTimeOut);
+ consumer.waitToComplete(defaultTimeOut);
+ }
+
+ @Override
+ public void waitToComplete(long timeout) {
+ setDrain(true);
+ flush();
+ long sleepTime = 1000;
+ long startTime = System.currentTimeMillis();
+ int prevQueueSize = -1;
+ int staticLoopCount = 0;
+ while ((queue.size() > 0 || localBatchBuffer.size() > 0)) {
+ if (prevQueueSize == queue.size()) {
+ logger.error("Queue size is not changing. " + getName()
+ + ".size=" + queue.size());
+ staticLoopCount++;
+ if (staticLoopCount > 5) {
+ logger.error("Aborting writing to consumer. Some logs will be discarded."
+ + getName() + ".size=" + queue.size());
+ }
+ } else {
+ staticLoopCount = 0;
+ }
+ if (consumerThread != null) {
+ consumerThread.interrupt();
+ }
+ try {
+ Thread.sleep(sleepTime);
+ if (timeout > 0
+ && (System.currentTimeMillis() - startTime > timeout)) {
+ break;
+ }
+ } catch (InterruptedException e) {
+ break;
+ }
+ }
+ consumer.waitToComplete(timeout);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
+ */
+ @Override
+ public boolean isFlushPending() {
+ if (queue.isEmpty()) {
+ return consumer.isFlushPending();
+ }
+ return true;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#flush()
+ */
+ @Override
+ public void flush() {
+ if (fileSpoolerEnabled) {
+ fileSpooler.flush();
+ }
+ consumer.flush();
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Runnable#run()
+ */
+ @Override
+ public void run() {
+ long lastDispatchTime = System.currentTimeMillis();
+ boolean isDestActive = true;
+ while (true) {
+ // Time to next dispatch
+ long nextDispatchDuration = lastDispatchTime
+ - System.currentTimeMillis() + getMaxBatchInterval();
+
+ boolean isToSpool = false;
+ boolean fileSpoolDrain = false;
+ try {
+ if (fileSpoolerEnabled && fileSpooler.isPending()) {
+ int percentUsed = (getMaxQueueSize() - queue.size()) * 100
+ / getMaxQueueSize();
+ long lastAttemptDelta = fileSpooler
+ .getLastAttemptTimeDelta();
+
+ fileSpoolDrain = lastAttemptDelta > fileSpoolMaxWaitTime;
+ // If we should even read from queue?
+ if (!isDrain() && !fileSpoolDrain
+ && percentUsed < fileSpoolDrainThresholdPercent) {
+ // Since some files are still under progress and it is
+ // not in drain mode, lets wait and retry
+ if (nextDispatchDuration > 0) {
+ Thread.sleep(nextDispatchDuration);
+ lastDispatchTime = System.currentTimeMillis();
+ }
+ continue;
+ }
+ isToSpool = true;
+ }
+
+ AuditEventBase event = null;
+
+ if (!isToSpool && !isDrain() && !fileSpoolDrain
+ && nextDispatchDuration > 0) {
+ event = queue.poll(nextDispatchDuration,
+ TimeUnit.MILLISECONDS);
+ } else {
+ // For poll() is non blocking
+ event = queue.poll();
+ }
+
+ if (event != null) {
+ localBatchBuffer.add(event);
+ if (getMaxBatchSize() >= localBatchBuffer.size()) {
+ queue.drainTo(localBatchBuffer, getMaxBatchSize()
+ - localBatchBuffer.size());
+ }
+ } else {
+ // poll returned due to timeout, so reseting clock
+ nextDispatchDuration = lastDispatchTime
+ - System.currentTimeMillis()
+ + getMaxBatchInterval();
+
+ lastDispatchTime = System.currentTimeMillis();
+ }
+ } catch (InterruptedException e) {
+ logger.info(
+ "Caught exception in consumer thread. Mostly to abort loop",
+ e);
+ setDrain(true);
+ } catch (Throwable t) {
+ logger.error("Caught error during processing request.", t);
+ }
+
+ if (localBatchBuffer.size() > 0 && isToSpool) {
+ // Let spool to the file directly
+ if (isDestActive) {
+ logger.info("Switching to file spool. Queue=" + getName()
+ + ", dest=" + consumer.getName());
+ }
+ isDestActive = false;
+ // Just before stashing
+ lastDispatchTime = System.currentTimeMillis();
+ fileSpooler.stashLogs(localBatchBuffer);
+ localBatchBuffer.clear();
+ } else if (localBatchBuffer.size() > 0
+ && (isDrain()
+ || localBatchBuffer.size() >= getMaxBatchSize() || nextDispatchDuration <= 0)) {
+ if (fileSpoolerEnabled && !isDestActive) {
+ logger.info("Switching to writing to destination. Queue="
+ + getName() + ", dest=" + consumer.getName());
+ }
+ // Reset time just before sending the logs
+ lastDispatchTime = System.currentTimeMillis();
+ boolean ret = consumer.log(localBatchBuffer);
+ if (!ret) {
+ if (fileSpoolerEnabled) {
+ logger.info("Switching to file spool. Queue="
+ + getName() + ", dest=" + consumer.getName());
+ // Transient error. Stash and move on
+ fileSpooler.stashLogs(localBatchBuffer);
+ isDestActive = false;
+ } else {
+ // We need to drop this event
+ logFailedEvent(localBatchBuffer, null);
+ }
+ } else {
+ isDestActive = true;
+ }
+ localBatchBuffer.clear();
+ }
+
+ if (isDrain()) {
+ if (!queue.isEmpty() || localBatchBuffer.size() > 0) {
+ logger.info("Queue is not empty. Will retry. queue.size)="
+ + queue.size() + ", localBatchBuffer.size()="
+ + localBatchBuffer.size());
+ } else {
+ break;
+ }
+ }
+ }
+
+ logger.info("Exiting consumerThread. Queue=" + getName() + ", dest="
+ + consumer.getName());
+ try {
+ // Call stop on the consumer
+ consumer.stop();
+ if (fileSpoolerEnabled) {
+ fileSpooler.stop();
+ }
+ } catch (Throwable t) {
+ logger.error("Error while calling stop on consumer.", t);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java
new file mode 100644
index 0000000..66d1573
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java
@@ -0,0 +1,884 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.queue;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedTransferQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.provider.AuditProvider;
+import org.apache.ranger.audit.provider.MiscUtil;
+
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+
+/**
+ * This class temporarily stores logs in file system if the destination is
+ * overloaded or down
+ */
+public class AuditFileSpool implements Runnable {
+ private static final Log logger = LogFactory.getLog(AuditFileSpool.class);
+
+ public enum SPOOL_FILE_STATUS {
+ pending, write_inprogress, read_inprogress, done
+ }
+
+ public static final String PROP_FILE_SPOOL_LOCAL_DIR = "filespool.dir";
+ public static final String PROP_FILE_SPOOL_LOCAL_FILE_NAME = "filespool.filename.format";
+ public static final String PROP_FILE_SPOOL_ARCHIVE_DIR = "filespool.archive.dir";
+ public static final String PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT = "filespool.archive.max.files";
+ public static final String PROP_FILE_SPOOL_FILENAME_PREFIX = "filespool.file.prefix";
+ public static final String PROP_FILE_SPOOL_FILE_ROLLOVER = "filespool.file.rollover.sec";
+ public static final String PROP_FILE_SPOOL_INDEX_FILE = "filespool.index.filename";
+ // public static final String PROP_FILE_SPOOL_INDEX_DONE_FILE =
+ // "filespool.index.done_filename";
+ public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms";
+
+ AuditProvider queueProvider = null;
+ AuditProvider consumerProvider = null;
+
+ BlockingQueue<AuditIndexRecord> indexQueue = new LinkedTransferQueue<AuditIndexRecord>();
+
+ // Folder and File attributes
+ File logFolder = null;
+ String logFileNameFormat = null;
+ File archiveFolder = null;
+ String fileNamePrefix = null;
+ String indexFileName = null;
+ File indexFile = null;
+ String indexDoneFileName = null;
+ File indexDoneFile = null;
+ int retryDestinationMS = 30 * 1000; // Default 30 seconds
+ int fileRolloverSec = 24 * 60 * 60; // In seconds
+ int maxArchiveFiles = 100;
+
+ int errorLogIntervalMS = 30 * 1000; // Every 30 seconds
+ long lastErrorLogMS = 0;
+
+ List<AuditIndexRecord> indexRecords = new ArrayList<AuditIndexRecord>();
+
+ boolean isPending = false;
+ long lastAttemptTime = 0;
+ boolean initDone = false;
+
+ PrintWriter logWriter = null;
+ AuditIndexRecord currentWriterIndexRecord = null;
+ AuditIndexRecord currentConsumerIndexRecord = null;
+
+ BufferedReader logReader = null;
+
+ Thread destinationThread = null;
+
+ boolean isWriting = true;
+ boolean isDrain = false;
+ boolean isDestDown = true;
+
+ private static Gson gson = null;
+
+ public AuditFileSpool(AuditProvider queueProvider,
+ AuditProvider consumerProvider) {
+ this.queueProvider = queueProvider;
+ this.consumerProvider = consumerProvider;
+ }
+
+ public void init(Properties prop) {
+ init(prop, null);
+ }
+
+ public void init(Properties props, String basePropertyName) {
+ if (initDone) {
+ logger.error("init() called more than once. queueProvider="
+ + queueProvider.getName() + ", consumerProvider="
+ + consumerProvider.getName());
+ return;
+ }
+ String propPrefix = "xasecure.audit.filespool";
+ if (basePropertyName != null) {
+ propPrefix = basePropertyName;
+ }
+
+ try {
+ gson = new GsonBuilder().setDateFormat("yyyy-MM-dd HH:mm:ss.SSS")
+ .create();
+
+ // Initial folder and file properties
+ String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
+ + "." + PROP_FILE_SPOOL_LOCAL_DIR);
+ logFileNameFormat = MiscUtil.getStringProperty(props,
+ basePropertyName + "." + PROP_FILE_SPOOL_LOCAL_FILE_NAME);
+ String archiveFolderProp = MiscUtil.getStringProperty(props,
+ propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_DIR);
+ fileNamePrefix = MiscUtil.getStringProperty(props, propPrefix + "."
+ + PROP_FILE_SPOOL_FILENAME_PREFIX);
+ indexFileName = MiscUtil.getStringProperty(props, propPrefix + "."
+ + PROP_FILE_SPOOL_INDEX_FILE);
+ retryDestinationMS = MiscUtil.getIntProperty(props, propPrefix
+ + "." + PROP_FILE_SPOOL_DEST_RETRY_MS, retryDestinationMS);
+ fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "."
+ + PROP_FILE_SPOOL_FILE_ROLLOVER, fileRolloverSec);
+ maxArchiveFiles = MiscUtil.getIntProperty(props, propPrefix + "."
+ + PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, maxArchiveFiles);
+
+ logger.info("retryDestinationMS=" + retryDestinationMS
+ + ", queueName=" + queueProvider.getName());
+ logger.info("fileRolloverSec=" + fileRolloverSec + ", queueName="
+ + queueProvider.getName());
+ logger.info("maxArchiveFiles=" + maxArchiveFiles + ", queueName="
+ + queueProvider.getName());
+
+ if (logFolderProp == null || logFolderProp.isEmpty()) {
+ logger.error("Audit spool folder is not configured. Please set "
+ + propPrefix
+ + "."
+ + PROP_FILE_SPOOL_LOCAL_DIR
+ + ". queueName=" + queueProvider.getName());
+ return;
+ }
+ logFolder = new File(logFolderProp);
+ if (!logFolder.isDirectory()) {
+ logFolder.mkdirs();
+ if (!logFolder.isDirectory()) {
+ logger.error("File Spool folder not found and can't be created. folder="
+ + logFolder.getAbsolutePath()
+ + ", queueName="
+ + queueProvider.getName());
+ return;
+ }
+ }
+ logger.info("logFolder=" + logFolder + ", queueName="
+ + queueProvider.getName());
+
+ if (logFileNameFormat == null || logFileNameFormat.isEmpty()) {
+ logFileNameFormat = "spool_" + "%app-type%" + "_"
+ + "%time:yyyyMMdd-HHmm.ss%.log";
+ }
+ logger.info("logFileNameFormat=" + logFileNameFormat
+ + ", queueName=" + queueProvider.getName());
+
+ if (archiveFolderProp == null || archiveFolderProp.isEmpty()) {
+ archiveFolder = new File(logFolder, "archive");
+ } else {
+ archiveFolder = new File(archiveFolderProp);
+ }
+ if (!archiveFolder.isDirectory()) {
+ archiveFolder.mkdirs();
+ if (!archiveFolder.isDirectory()) {
+ logger.error("File Spool archive folder not found and can't be created. folder="
+ + archiveFolder.getAbsolutePath()
+ + ", queueName="
+ + queueProvider.getName());
+ return;
+ }
+ }
+ logger.info("archiveFolder=" + archiveFolder + ", queueName="
+ + queueProvider.getName());
+
+ if (indexFileName == null || indexFileName.isEmpty()) {
+ if (fileNamePrefix == null || fileNamePrefix.isEmpty()) {
+ fileNamePrefix = queueProvider.getName() + "_"
+ + consumerProvider.getName();
+ }
+ indexFileName = "index_" + fileNamePrefix + ".json";
+ }
+
+ indexFile = new File(logFolder, indexFileName);
+ if (!indexFile.exists()) {
+ indexFile.createNewFile();
+ }
+ logger.info("indexFile=" + indexFile + ", queueName="
+ + queueProvider.getName());
+
+ int lastDot = indexFileName.lastIndexOf('.');
+ indexDoneFileName = indexFileName.substring(0, lastDot)
+ + "_closed.json";
+ indexDoneFile = new File(logFolder, indexDoneFileName);
+ if (!indexDoneFile.exists()) {
+ indexDoneFile.createNewFile();
+ }
+ logger.info("indexDoneFile=" + indexDoneFile + ", queueName="
+ + queueProvider.getName());
+
+ // Load index file
+ loadIndexFile();
+ for (AuditIndexRecord auditIndexRecord : indexRecords) {
+ if (!auditIndexRecord.status.equals(SPOOL_FILE_STATUS.done)) {
+ isPending = true;
+ }
+ if (auditIndexRecord.status
+ .equals(SPOOL_FILE_STATUS.write_inprogress)) {
+ currentWriterIndexRecord = auditIndexRecord;
+ logger.info("currentWriterIndexRecord="
+ + currentWriterIndexRecord.filePath
+ + ", queueName=" + queueProvider.getName());
+ }
+ if (auditIndexRecord.status
+ .equals(SPOOL_FILE_STATUS.read_inprogress)) {
+ indexQueue.add(auditIndexRecord);
+ }
+ }
+ printIndex();
+ // One more loop to add the rest of the pending records in reverse
+ // order
+ for (int i = 0; i < indexRecords.size(); i++) {
+ AuditIndexRecord auditIndexRecord = indexRecords.get(i);
+ if (auditIndexRecord.status.equals(SPOOL_FILE_STATUS.pending)) {
+ File consumerFile = new File(auditIndexRecord.filePath);
+ if (!consumerFile.exists()) {
+ logger.error("INIT: Consumer file="
+ + consumerFile.getPath() + " not found.");
+ System.exit(1);
+ }
+ indexQueue.add(auditIndexRecord);
+ }
+ }
+
+ } catch (Throwable t) {
+ logger.fatal("Error initializing File Spooler. queue="
+ + queueProvider.getName(), t);
+ return;
+ }
+ initDone = true;
+ }
+
+ /**
+ * Start looking for outstanding logs and update status according.
+ */
+ public void start() {
+ if (!initDone) {
+ logger.error("Cannot start Audit File Spooler. Initilization not done yet. queueName="
+ + queueProvider.getName());
+ return;
+ }
+
+ logger.info("Starting writerThread, queueName="
+ + queueProvider.getName() + ", consumer="
+ + consumerProvider.getName());
+
+ // Let's start the thread to read
+ destinationThread = new Thread(this, queueProvider.getName()
+ + "_destWriter");
+ destinationThread.setDaemon(true);
+ destinationThread.start();
+ }
+
+ public void stop() {
+ if (!initDone) {
+ logger.error("Cannot stop Audit File Spooler. Initilization not done. queueName="
+ + queueProvider.getName());
+ return;
+ }
+ logger.info("Stop called, queueName=" + queueProvider.getName()
+ + ", consumer=" + consumerProvider.getName());
+
+ isDrain = true;
+ flush();
+
+ PrintWriter out = getOpenLogFileStream();
+ if (out != null) {
+ // If write is still going on, then let's give it enough time to
+ // complete
+ for (int i = 0; i < 3; i++) {
+ if (isWriting) {
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ continue;
+ }
+ try {
+ logger.info("Closing open file, queueName="
+ + queueProvider.getName() + ", consumer="
+ + consumerProvider.getName());
+
+ out.flush();
+ out.close();
+ } catch (Throwable t) {
+ logger.debug("Error closing spool out file.", t);
+ }
+ }
+ }
+ try {
+ if (destinationThread != null) {
+ destinationThread.interrupt();
+ }
+ destinationThread = null;
+ } catch (Throwable e) {
+ // ignore
+ }
+ }
+
+ public void flush() {
+ if (!initDone) {
+ logger.error("Cannot flush Audit File Spooler. Initilization not done. queueName="
+ + queueProvider.getName());
+ return;
+ }
+ PrintWriter out = getOpenLogFileStream();
+ if (out != null) {
+ out.flush();
+ }
+ }
+
+ /**
+ * If any files are still not processed. Also, if the destination is not
+ * reachable
+ *
+ * @return
+ */
+ public boolean isPending() {
+ if (!initDone) {
+ logError("isPending(): File Spooler not initialized. queueName="
+ + queueProvider.getName());
+ return false;
+ }
+
+ return isPending;
+ }
+
+ /**
+ * Milliseconds from last attempt time
+ *
+ * @return
+ */
+ public long getLastAttemptTimeDelta() {
+ if (lastAttemptTime == 0) {
+ return 0;
+ }
+ return System.currentTimeMillis() - lastAttemptTime;
+ }
+
+ synchronized public void stashLogs(AuditEventBase event) {
+ if (isDrain) {
+ // Stop has been called, so this method shouldn't be called
+ logger.error("stashLogs() is called after stop is called. event="
+ + event);
+ return;
+ }
+ try {
+ isWriting = true;
+ PrintWriter logOut = getLogFileStream();
+ // Convert event to json
+ String jsonStr = MiscUtil.stringify(event);
+ logOut.println(jsonStr);
+ isPending = true;
+ } catch (Exception ex) {
+ logger.error("Error writing to file. event=" + event, ex);
+ } finally {
+ isWriting = false;
+ }
+
+ }
+
+ synchronized public void stashLogs(Collection<AuditEventBase> events) {
+ for (AuditEventBase event : events) {
+ stashLogs(event);
+ }
+ flush();
+ }
+
+ synchronized public void stashLogsString(String event) {
+ if (isDrain) {
+ // Stop has been called, so this method shouldn't be called
+ logger.error("stashLogs() is called after stop is called. event="
+ + event);
+ return;
+ }
+ try {
+ isWriting = true;
+ PrintWriter logOut = getLogFileStream();
+ logOut.println(event);
+ } catch (Exception ex) {
+ logger.error("Error writing to file. event=" + event, ex);
+ } finally {
+ isWriting = false;
+ }
+
+ }
+
+ synchronized public void stashLogsString(Collection<String> events) {
+ for (String event : events) {
+ stashLogsString(event);
+ }
+ flush();
+ }
+
+ /**
+ * This return the current file. If there are not current open output file,
+ * then it will return null
+ *
+ * @return
+ * @throws Exception
+ */
+ synchronized private PrintWriter getOpenLogFileStream() {
+ return logWriter;
+ }
+
+ /**
+ * @return
+ * @throws Exception
+ */
+ synchronized private PrintWriter getLogFileStream() throws Exception {
+ closeFileIfNeeded();
+
+ // Either there are no open log file or the previous one has been rolled
+ // over
+ if (currentWriterIndexRecord == null) {
+ Date currentTime = new Date();
+ // Create a new file
+ String fileName = MiscUtil.replaceTokens(logFileNameFormat,
+ currentTime.getTime());
+ String newFileName = fileName;
+ File outLogFile = null;
+ int i = 0;
+ while (true) {
+ outLogFile = new File(logFolder, newFileName);
+ File archiveLogFile = new File(archiveFolder, newFileName);
+ if (!outLogFile.exists() && !archiveLogFile.exists()) {
+ break;
+ }
+ i++;
+ int lastDot = fileName.lastIndexOf('.');
+ String baseName = fileName.substring(0, lastDot);
+ String extension = fileName.substring(lastDot);
+ newFileName = baseName + "." + i + extension;
+ }
+ fileName = newFileName;
+ logger.info("Creating new file. queueName="
+ + queueProvider.getName() + ", fileName=" + fileName);
+ // Open the file
+ logWriter = new PrintWriter(new BufferedWriter(new FileWriter(
+ outLogFile)));
+
+ AuditIndexRecord tmpIndexRecord = new AuditIndexRecord();
+
+ tmpIndexRecord.id = MiscUtil.generateUniqueId();
+ tmpIndexRecord.filePath = outLogFile.getPath();
+ tmpIndexRecord.status = SPOOL_FILE_STATUS.write_inprogress;
+ tmpIndexRecord.fileCreateTime = currentTime;
+ tmpIndexRecord.lastAttempt = true;
+ currentWriterIndexRecord = tmpIndexRecord;
+ indexRecords.add(currentWriterIndexRecord);
+ saveIndexFile();
+
+ } else {
+ if (logWriter == null) {
+ // This means the process just started. We need to open the file
+ // in append mode.
+ logger.info("Opening existing file for append. queueName="
+ + queueProvider.getName() + ", fileName="
+ + currentWriterIndexRecord.filePath);
+ logWriter = new PrintWriter(new BufferedWriter(new FileWriter(
+ currentWriterIndexRecord.filePath, true)));
+ }
+ }
+ return logWriter;
+ }
+
+ synchronized private void closeFileIfNeeded() throws FileNotFoundException,
+ IOException {
+ // Is there file open to write or there are no pending file, then close
+ // the active file
+ if (currentWriterIndexRecord != null) {
+ // Check whether the file needs to rolled
+ boolean closeFile = false;
+ if (indexRecords.size() == 1) {
+ closeFile = true;
+ logger.info("Closing file. Only one open file. queueName="
+ + queueProvider.getName() + ", fileName="
+ + currentWriterIndexRecord.filePath);
+ } else if (System.currentTimeMillis()
+ - currentWriterIndexRecord.fileCreateTime.getTime() > fileRolloverSec * 1000) {
+ closeFile = true;
+ logger.info("Closing file. Rolling over. queueName="
+ + queueProvider.getName() + ", fileName="
+ + currentWriterIndexRecord.filePath);
+ }
+ if (closeFile) {
+ // Roll the file
+ if (logWriter != null) {
+ logWriter.flush();
+ logWriter.close();
+ logWriter = null;
+ }
+ currentWriterIndexRecord.status = SPOOL_FILE_STATUS.pending;
+ currentWriterIndexRecord.writeCompleteTime = new Date();
+ saveIndexFile();
+ logger.info("Adding file to queue. queueName="
+ + queueProvider.getName() + ", fileName="
+ + currentWriterIndexRecord.filePath);
+ indexQueue.add(currentWriterIndexRecord);
+ currentWriterIndexRecord = null;
+ }
+ }
+ }
+
+ /**
+ * Load the index file
+ *
+ * @throws IOException
+ */
+ void loadIndexFile() throws IOException {
+ logger.info("Loading index file. fileName=" + indexFile.getPath());
+ BufferedReader br = new BufferedReader(new FileReader(indexFile));
+ indexRecords.clear();
+ String line;
+ while ((line = br.readLine()) != null) {
+ if (!line.isEmpty() && !line.startsWith("#")) {
+ AuditIndexRecord record = gson.fromJson(line,
+ AuditIndexRecord.class);
+ indexRecords.add(record);
+ }
+ }
+ br.close();
+ }
+
+ synchronized void printIndex() {
+ logger.info("INDEX printIndex() ==== START");
+ Iterator<AuditIndexRecord> iter = indexRecords.iterator();
+ while (iter.hasNext()) {
+ AuditIndexRecord record = iter.next();
+ logger.info("INDEX=" + record + ", isFileExist="
+ + (new File(record.filePath).exists()));
+ }
+ logger.info("INDEX printIndex() ==== END");
+ }
+
+ synchronized void removeIndexRecord(AuditIndexRecord indexRecord)
+ throws FileNotFoundException, IOException {
+ Iterator<AuditIndexRecord> iter = indexRecords.iterator();
+ while (iter.hasNext()) {
+ AuditIndexRecord record = iter.next();
+ if (record.id.equals(indexRecord.id)) {
+ logger.info("Removing file from index. file=" + record.filePath
+ + ", queueName=" + queueProvider.getName()
+ + ", consumer=" + consumerProvider.getName());
+
+ iter.remove();
+ appendToDoneFile(record);
+ }
+ }
+ saveIndexFile();
+ }
+
+ synchronized void saveIndexFile() throws FileNotFoundException, IOException {
+ PrintWriter out = new PrintWriter(indexFile);
+ for (AuditIndexRecord auditIndexRecord : indexRecords) {
+ out.println(gson.toJson(auditIndexRecord));
+ }
+ out.close();
+ // printIndex();
+
+ }
+
+ void appendToDoneFile(AuditIndexRecord indexRecord)
+ throws FileNotFoundException, IOException {
+ logger.info("Moving to done file. " + indexRecord.filePath
+ + ", queueName=" + queueProvider.getName() + ", consumer="
+ + consumerProvider.getName());
+ String line = gson.toJson(indexRecord);
+ PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(
+ indexDoneFile, true)));
+ out.println(line);
+ out.flush();
+ out.close();
+
+ // Move to archive folder
+ File logFile = null;
+ File archiveFile = null;
+ try {
+ logFile = new File(indexRecord.filePath);
+ String fileName = logFile.getName();
+ archiveFile = new File(archiveFolder, fileName);
+ logger.info("Moving logFile " + logFile + " to " + archiveFile);
+ logFile.renameTo(archiveFile);
+ } catch (Throwable t) {
+ logger.error("Error moving log file to archive folder. logFile="
+ + logFile + ", archiveFile=" + archiveFile, t);
+ }
+
+ archiveFile = null;
+ try {
+ // Remove old files
+ File[] logFiles = archiveFolder.listFiles(new FileFilter() {
+ public boolean accept(File pathname) {
+ return pathname.getName().toLowerCase().endsWith(".log");
+ }
+ });
+
+ if (logFiles.length > maxArchiveFiles) {
+ int filesToDelete = logFiles.length - maxArchiveFiles;
+ BufferedReader br = new BufferedReader(new FileReader(
+ indexDoneFile));
+ try {
+ int filesDeletedCount = 0;
+ while ((line = br.readLine()) != null) {
+ if (!line.isEmpty() && !line.startsWith("#")) {
+ AuditIndexRecord record = gson.fromJson(line,
+ AuditIndexRecord.class);
+ logFile = new File(record.filePath);
+ String fileName = logFile.getName();
+ archiveFile = new File(archiveFolder, fileName);
+ if (archiveFile.exists()) {
+ logger.info("Deleting archive file "
+ + archiveFile);
+ boolean ret = archiveFile.delete();
+ if (!ret) {
+ logger.error("Error deleting archive file. archiveFile="
+ + archiveFile);
+ }
+ filesDeletedCount++;
+ if (filesDeletedCount >= filesToDelete) {
+ logger.info("Deleted " + filesDeletedCount
+ + " files");
+ break;
+ }
+ }
+ }
+ }
+ } finally {
+ br.close();
+ }
+ }
+ } catch (Throwable t) {
+ logger.error("Error deleting older archive file. archiveFile="
+ + archiveFile, t);
+ }
+
+ }
+
+ void logError(String msg) {
+ long currTimeMS = System.currentTimeMillis();
+ if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) {
+ logger.error(msg);
+ lastErrorLogMS = currTimeMS;
+ }
+ }
+
+ class AuditIndexRecord {
+ String id;
+ String filePath;
+ int linePosition = 0;
+ SPOOL_FILE_STATUS status = SPOOL_FILE_STATUS.write_inprogress;
+ Date fileCreateTime;
+ Date writeCompleteTime;
+ Date doneCompleteTime;
+ Date lastSuccessTime;
+ Date lastFailedTime;
+ int failedAttemptCount = 0;
+ boolean lastAttempt = false;
+
+ @Override
+ public String toString() {
+ return "AuditIndexRecord [id=" + id + ", filePath=" + filePath
+ + ", linePosition=" + linePosition + ", status=" + status
+ + ", fileCreateTime=" + fileCreateTime
+ + ", writeCompleteTime=" + writeCompleteTime
+ + ", doneCompleteTime=" + doneCompleteTime
+ + ", lastSuccessTime=" + lastSuccessTime
+ + ", lastFailedTime=" + lastFailedTime
+ + ", failedAttemptCount=" + failedAttemptCount
+ + ", lastAttempt=" + lastAttempt + "]";
+ }
+
+ }
+
+ class AuditFileSpoolAttempt {
+ Date attemptTime;
+ String status;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Runnable#run()
+ */
+ @Override
+ public void run() {
+ while (true) {
+ try {
+ // Let's pause between each iteration
+ if (currentConsumerIndexRecord == null) {
+ currentConsumerIndexRecord = indexQueue.poll(
+ retryDestinationMS, TimeUnit.MILLISECONDS);
+ } else {
+ Thread.sleep(retryDestinationMS);
+ }
+
+ if (isDrain) {
+ // Need to exit
+ break;
+ }
+ if (currentConsumerIndexRecord == null) {
+ closeFileIfNeeded();
+ continue;
+ }
+
+ boolean isRemoveIndex = false;
+ File consumerFile = new File(
+ currentConsumerIndexRecord.filePath);
+ if (!consumerFile.exists()) {
+ logger.error("Consumer file=" + consumerFile.getPath()
+ + " not found.");
+ printIndex();
+ isRemoveIndex = true;
+ } else {
+ // Let's open the file to write
+ BufferedReader br = new BufferedReader(new FileReader(
+ currentConsumerIndexRecord.filePath));
+ try {
+ int startLine = currentConsumerIndexRecord.linePosition;
+ String line;
+ int currLine = 0;
+ boolean isResumed = false;
+ List<String> lines = new ArrayList<String>();
+ while ((line = br.readLine()) != null) {
+ currLine++;
+ if (currLine < startLine) {
+ continue;
+ }
+ lines.add(line);
+ if (lines.size() == queueProvider.getMaxBatchSize()) {
+ boolean ret = sendEvent(lines,
+ currentConsumerIndexRecord, currLine);
+ if (!ret) {
+ throw new Exception("Destination down");
+ } else {
+ if (!isResumed) {
+ logger.info("Started writing to destination. file="
+ + currentConsumerIndexRecord.filePath
+ + ", queueName="
+ + queueProvider.getName()
+ + ", consumer="
+ + consumerProvider.getName());
+ }
+ }
+ lines.clear();
+ }
+ }
+ if (lines.size() > 0) {
+ boolean ret = sendEvent(lines,
+ currentConsumerIndexRecord, currLine);
+ if (!ret) {
+ throw new Exception("Destination down");
+ } else {
+ if (!isResumed) {
+ logger.info("Started writing to destination. file="
+ + currentConsumerIndexRecord.filePath
+ + ", queueName="
+ + queueProvider.getName()
+ + ", consumer="
+ + consumerProvider.getName());
+ }
+ }
+ lines.clear();
+ }
+ logger.info("Done reading file. file="
+ + currentConsumerIndexRecord.filePath
+ + ", queueName=" + queueProvider.getName()
+ + ", consumer=" + consumerProvider.getName());
+ // The entire file is read
+ currentConsumerIndexRecord.status = SPOOL_FILE_STATUS.done;
+ currentConsumerIndexRecord.doneCompleteTime = new Date();
+ currentConsumerIndexRecord.lastAttempt = true;
+
+ isRemoveIndex = true;
+ } catch (Exception ex) {
+ isDestDown = true;
+ logError("Destination down. queueName="
+ + queueProvider.getName() + ", consumer="
+ + consumerProvider.getName());
+ lastAttemptTime = System.currentTimeMillis();
+ // Update the index file
+ currentConsumerIndexRecord.lastFailedTime = new Date();
+ currentConsumerIndexRecord.failedAttemptCount++;
+ currentConsumerIndexRecord.lastAttempt = false;
+ saveIndexFile();
+ } finally {
+ br.close();
+ }
+ }
+ if (isRemoveIndex) {
+ // Remove this entry from index
+ removeIndexRecord(currentConsumerIndexRecord);
+ currentConsumerIndexRecord = null;
+ closeFileIfNeeded();
+ }
+ } catch (Throwable t) {
+ logger.error("Exception in destination writing thread.", t);
+ }
+ }
+ logger.info("Exiting file spooler. provider=" + queueProvider.getName()
+ + ", consumer=" + consumerProvider.getName());
+ }
+
+ private boolean sendEvent(List<String> lines, AuditIndexRecord indexRecord,
+ int currLine) {
+ boolean ret = true;
+ try {
+ ret = consumerProvider.logJSON(lines);
+ if (!ret) {
+ // Need to log error after fixed interval
+ logError("Error sending logs to consumer. provider="
+ + queueProvider.getName() + ", consumer="
+ + consumerProvider.getName());
+ } else {
+ // Update index and save
+ indexRecord.linePosition = currLine;
+ indexRecord.status = SPOOL_FILE_STATUS.read_inprogress;
+ indexRecord.lastSuccessTime = new Date();
+ indexRecord.lastAttempt = true;
+ saveIndexFile();
+
+ if (isDestDown) {
+ isDestDown = false;
+ logger.info("Destination up now. " + indexRecord.filePath
+ + ", queueName=" + queueProvider.getName()
+ + ", consumer=" + consumerProvider.getName());
+ }
+ }
+ } catch (Throwable t) {
+ logger.error("Error while sending logs to consumer. provider="
+ + queueProvider.getName() + ", consumer="
+ + consumerProvider.getName() + ", log=" + lines, t);
+ }
+
+ return ret;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java
new file mode 100644
index 0000000..e102d8b
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java
@@ -0,0 +1,255 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.queue;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.concurrent.LinkedTransferQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.provider.AuditProvider;
+import org.apache.ranger.audit.provider.BaseAuditProvider;
+import org.apache.ranger.audit.provider.MiscUtil;
+
+/**
+ * This is a non-blocking queue with no limit on capacity.
+ */
+public class AuditSummaryQueue extends BaseAuditProvider implements Runnable {
+ private static final Log logger = LogFactory
+ .getLog(AuditSummaryQueue.class);
+
+ public static final String PROP_SUMMARY_INTERVAL = "summary.interval.ms";
+
+ LinkedTransferQueue<AuditEventBase> queue = new LinkedTransferQueue<AuditEventBase>();
+ Thread consumerThread = null;
+
+ static int threadCount = 0;
+ static final String DEFAULT_NAME = "summary";
+
+ private static final int MAX_DRAIN = 100000;
+
+ private int maxSummaryInterval = 5000;
+
+ HashMap<String, AuditSummary> summaryMap = new HashMap<String, AuditSummary>();
+
+ public AuditSummaryQueue() {
+ setName(DEFAULT_NAME);
+ }
+
+ public AuditSummaryQueue(AuditProvider consumer) {
+ super(consumer);
+ setName(DEFAULT_NAME);
+ }
+
+ @Override
+ public void init(Properties props, String propPrefix) {
+ super.init(props, propPrefix);
+ maxSummaryInterval = MiscUtil.getIntProperty(props, propPrefix + "."
+ + PROP_SUMMARY_INTERVAL, maxSummaryInterval);
+ logger.info("maxSummaryInterval=" + maxSummaryInterval + ", name="
+ + getName());
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger.
+ * audit.model.AuditEventBase)
+ */
+ @Override
+ public boolean log(AuditEventBase event) {
+ // Add to the queue and return ASAP
+ if (queue.size() >= getMaxQueueSize()) {
+ return false;
+ }
+ queue.add(event);
+ addLifeTimeInLogCount(1);
+ return true;
+ }
+
+ @Override
+ public boolean log(Collection<AuditEventBase> events) {
+ boolean ret = true;
+ for (AuditEventBase event : events) {
+ ret = log(event);
+ if (!ret) {
+ break;
+ }
+ }
+ return ret;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#start()
+ */
+ @Override
+ public void start() {
+ if (consumer != null) {
+ consumer.start();
+ }
+
+ consumerThread = new Thread(this, this.getClass().getName()
+ + (threadCount++));
+ consumerThread.setDaemon(true);
+ consumerThread.start();
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#stop()
+ */
+ @Override
+ public void stop() {
+ setDrain(true);
+ try {
+ if (consumerThread != null) {
+ consumerThread.interrupt();
+ }
+ consumerThread = null;
+ } catch (Throwable t) {
+ // ignore any exception
+ }
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
+ */
+ @Override
+ public boolean isFlushPending() {
+ if (queue.isEmpty()) {
+ return consumer.isFlushPending();
+ }
+ return true;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Runnable#run()
+ */
+ @Override
+ public void run() {
+ long lastDispatchTime = System.currentTimeMillis();
+
+ while (true) {
+ // Time to next dispatch
+ long nextDispatchDuration = lastDispatchTime
+ - System.currentTimeMillis() + maxSummaryInterval;
+
+ Collection<AuditEventBase> eventList = new ArrayList<AuditEventBase>();
+
+ try {
+ AuditEventBase event = null;
+ if (!isDrain() && nextDispatchDuration > 0) {
+ event = queue.poll(nextDispatchDuration,
+ TimeUnit.MILLISECONDS);
+ } else {
+ // For poll() is non blocking
+ event = queue.poll();
+ }
+
+ if (event != null) {
+ eventList.add(event);
+ queue.drainTo(eventList, MAX_DRAIN - 1);
+ } else {
+ // poll returned due to timeout, so reseting clock
+ nextDispatchDuration = lastDispatchTime
+ - System.currentTimeMillis() + maxSummaryInterval;
+ lastDispatchTime = System.currentTimeMillis();
+ }
+ } catch (InterruptedException e) {
+ logger.info(
+ "Caught exception in consumer thread. Mostly to about loop",
+ e);
+ } catch (Throwable t) {
+ logger.error("Caught error during processing request.", t);
+ }
+
+ for (AuditEventBase event : eventList) {
+ // Add to hash map
+ String key = event.getEventKey();
+ AuditSummary auditSummary = summaryMap.get(key);
+ if (auditSummary == null) {
+ auditSummary = new AuditSummary();
+ auditSummary.event = event;
+ auditSummary.startTime = event.getEventTime();
+ auditSummary.endTime = event.getEventTime();
+ auditSummary.count = 1;
+ summaryMap.put(key, auditSummary);
+ } else {
+ auditSummary.endTime = event.getEventTime();
+ auditSummary.count++;
+ }
+ }
+
+ if (isDrain() || nextDispatchDuration <= 0) {
+ for (Map.Entry<String, AuditSummary> entry : summaryMap
+ .entrySet()) {
+ AuditSummary auditSummary = entry.getValue();
+ auditSummary.event.setEventCount(auditSummary.count);
+ long timeDiff = auditSummary.endTime.getTime()
+ - auditSummary.startTime.getTime();
+ timeDiff = timeDiff > 0 ? timeDiff : 1;
+ auditSummary.event.setEventDurationMS(timeDiff);
+
+ // Reset time just before sending the logs
+ lastDispatchTime = System.currentTimeMillis();
+ boolean ret = consumer.log(auditSummary.event);
+ if (!ret) {
+ // We need to drop this event
+ logFailedEvent(auditSummary.event, null);
+ }
+ }
+ summaryMap.clear();
+ }
+
+ if (isDrain() && summaryMap.isEmpty() && queue.isEmpty()) {
+ break;
+ }
+ }
+
+ try {
+ // Call stop on the consumer
+ consumer.stop();
+ } catch (Throwable t) {
+ logger.error("Error while calling stop on consumer.", t);
+ }
+ }
+
+ class AuditSummary {
+ Date startTime = null;
+ Date endTime = null;
+ int count = 0;
+ AuditEventBase event;
+ }
+}
[03/12] incubator-ranger git commit: RANGER-276 Add support for
aggregating audit logs at source
Posted by bo...@apache.org.
RANGER-276 Add support for aggregating audit logs at source
Project: http://git-wip-us.apache.org/repos/asf/incubator-ranger/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ranger/commit/236f1ba6
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ranger/tree/236f1ba6
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ranger/diff/236f1ba6
Branch: refs/heads/master
Commit: 236f1ba67287725544c5a2d07a688d0894de46c3
Parents: 3adafa4
Author: Don Bosco Durai <bo...@apache.org>
Authored: Mon Apr 20 10:23:47 2015 -0700
Committer: Don Bosco Durai <bo...@apache.org>
Committed: Mon Apr 20 10:23:47 2015 -0700
----------------------------------------------------------------------
.../audit/destination/AuditDestination.java | 71 ++
.../audit/destination/FileAuditDestination.java | 231 +++++
.../audit/destination/HDFSAuditDestination.java | 244 +++++
.../ranger/audit/model/AuditEventBase.java | 9 +-
.../ranger/audit/model/AuthzAuditEvent.java | 247 +++---
.../ranger/audit/provider/AuditAsyncQueue.java | 167 ----
.../audit/provider/AuditBatchProcessor.java | 327 -------
.../ranger/audit/provider/AuditDestination.java | 70 --
.../ranger/audit/provider/AuditFileSpool.java | 875 ------------------
.../audit/provider/AuditProviderFactory.java | 64 +-
.../audit/provider/BaseAuditProvider.java | 1 +
.../audit/provider/BufferedAuditProvider.java | 36 +-
.../ranger/audit/provider/DbAuditProvider.java | 17 +-
.../audit/provider/FileAuditDestination.java | 230 -----
.../audit/provider/HDFSAuditDestination.java | 243 -----
.../audit/provider/Log4jAuditProvider.java | 1 +
.../audit/provider/MultiDestAuditProvider.java | 2 +
.../ranger/audit/queue/AuditAsyncQueue.java | 174 ++++
.../ranger/audit/queue/AuditBatchQueue.java | 346 ++++++++
.../ranger/audit/queue/AuditFileSpool.java | 884 +++++++++++++++++++
.../ranger/audit/queue/AuditSummaryQueue.java | 255 ++++++
.../apache/ranger/audit/TestAuditProcessor.java | 786 -----------------
.../org/apache/ranger/audit/TestAuditQueue.java | 704 +++++++++++++++
.../org/apache/ranger/audit/TestConsumer.java | 248 ++++++
24 files changed, 3390 insertions(+), 2842 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java
new file mode 100644
index 0000000..25c0220
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.destination;
+
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.provider.BaseAuditProvider;
+
+/**
+ * This class needs to be extended by anyone who wants to build custom
+ * destination
+ */
+public abstract class AuditDestination extends BaseAuditProvider {
+ private static final Log logger = LogFactory.getLog(AuditDestination.class);
+
+ public AuditDestination() {
+ logger.info("AuditDestination() enter");
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#init(java.util.Properties,
+ * java.lang.String)
+ */
+ @Override
+ public void init(Properties prop, String basePropertyName) {
+ super.init(prop, basePropertyName);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
+ */
+ @Override
+ public boolean isFlushPending() {
+ return false;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#flush()
+ */
+ @Override
+ public void flush() {
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java
new file mode 100644
index 0000000..1ccfd5f
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.destination;
+
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.provider.MiscUtil;
+
+/**
+ * This class write the logs to local file
+ */
+public class FileAuditDestination extends AuditDestination {
+ private static final Log logger = LogFactory
+ .getLog(FileAuditDestination.class);
+
+ public static final String PROP_FILE_LOCAL_DIR = "dir";
+ public static final String PROP_FILE_LOCAL_FILE_NAME_FORMAT = "filename.format";
+ public static final String PROP_FILE_FILE_ROLLOVER = "file.rollover.sec";
+
+ String baseFolder = null;
+ String fileFormat = null;
+ int fileRolloverSec = 24 * 60 * 60; // In seconds
+ private String logFileNameFormat;
+
+ boolean initDone = false;
+
+ private File logFolder;
+ PrintWriter logWriter = null;
+
+ private Date fileCreateTime = null;
+
+ private String currentFileName;
+
+ private boolean isStopped = false;
+
+ @Override
+ public void init(Properties prop, String propPrefix) {
+ super.init(prop, propPrefix);
+
+ // Initialize properties for this class
+ // Initial folder and file properties
+ String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
+ + "." + PROP_FILE_LOCAL_DIR);
+ logFileNameFormat = MiscUtil.getStringProperty(props, propPrefix + "."
+ + PROP_FILE_LOCAL_FILE_NAME_FORMAT);
+ fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "."
+ + PROP_FILE_FILE_ROLLOVER, fileRolloverSec);
+
+ if (logFolderProp == null || logFolderProp.isEmpty()) {
+ logger.error("File destination folder is not configured. Please set "
+ + propPrefix
+ + "."
+ + PROP_FILE_LOCAL_DIR
+ + ". name="
+ + getName());
+ return;
+ }
+ logFolder = new File(logFolderProp);
+ if (!logFolder.isDirectory()) {
+ logFolder.mkdirs();
+ if (!logFolder.isDirectory()) {
+ logger.error("FileDestination folder not found and can't be created. folder="
+ + logFolder.getAbsolutePath() + ", name=" + getName());
+ return;
+ }
+ }
+ logger.info("logFolder=" + logFolder + ", name=" + getName());
+
+ if (logFileNameFormat == null || logFileNameFormat.isEmpty()) {
+ logFileNameFormat = "%app-type%_ranger_audit.log";
+ }
+
+ logger.info("logFileNameFormat=" + logFileNameFormat + ", destName="
+ + getName());
+
+ initDone = true;
+ }
+
+ @Override
+ public boolean logJSON(Collection<String> events) {
+ try {
+ PrintWriter out = getLogFileStream();
+ for (String event : events) {
+ out.println(event);
+ }
+ out.flush();
+ } catch (Throwable t) {
+ logError("Error writing to log file.", t);
+ return false;
+ }
+ return true;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#log(java.util.Collection)
+ */
+ @Override
+ synchronized public boolean log(Collection<AuditEventBase> events) {
+ if (isStopped) {
+ logError("log() called after stop was requested. name=" + getName());
+ return false;
+ }
+ List<String> jsonList = new ArrayList<String>();
+ for (AuditEventBase event : events) {
+ try {
+ jsonList.add(MiscUtil.stringify(event));
+ } catch (Throwable t) {
+ logger.error("Error converting to JSON. event=" + event);
+ }
+ }
+ return logJSON(jsonList);
+
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#start()
+ */
+ @Override
+ public void start() {
+ // Nothing to do here. We will open the file when the first log request
+ // comes
+ }
+
+ @Override
+ synchronized public void stop() {
+ if (logWriter != null) {
+ logWriter.flush();
+ logWriter.close();
+ logWriter = null;
+ isStopped = true;
+ }
+ }
+
+ // Helper methods in this class
+ synchronized private PrintWriter getLogFileStream() throws Exception {
+ closeFileIfNeeded();
+
+ // Either there are no open log file or the previous one has been rolled
+ // over
+ if (logWriter == null) {
+ Date currentTime = new Date();
+ // Create a new file
+ String fileName = MiscUtil.replaceTokens(logFileNameFormat,
+ currentTime.getTime());
+ File outLogFile = new File(logFolder, fileName);
+ if (outLogFile.exists()) {
+ // Let's try to get the next available file
+ int i = 0;
+ while (true) {
+ i++;
+ int lastDot = fileName.lastIndexOf('.');
+ String baseName = fileName.substring(0, lastDot);
+ String extension = fileName.substring(lastDot);
+ String newFileName = baseName + "." + i + extension;
+ File newLogFile = new File(logFolder, newFileName);
+ if (!newLogFile.exists()) {
+ // Move the file
+ if (!outLogFile.renameTo(newLogFile)) {
+ logger.error("Error renameing file. " + outLogFile
+ + " to " + newLogFile);
+ }
+ break;
+ }
+ }
+ }
+ if (!outLogFile.exists()) {
+ logger.info("Creating new file. destName=" + getName()
+ + ", fileName=" + fileName);
+ // Open the file
+ logWriter = new PrintWriter(new BufferedWriter(new FileWriter(
+ outLogFile)));
+ } else {
+ logWriter = new PrintWriter(new BufferedWriter(new FileWriter(
+ outLogFile, true)));
+ }
+ fileCreateTime = new Date();
+ currentFileName = outLogFile.getPath();
+ }
+ return logWriter;
+ }
+
+ private void closeFileIfNeeded() throws FileNotFoundException, IOException {
+ if (logWriter == null) {
+ return;
+ }
+ if (System.currentTimeMillis() - fileCreateTime.getTime() > fileRolloverSec * 1000) {
+ logger.info("Closing file. Rolling over. name=" + getName()
+ + ", fileName=" + currentFileName);
+ logWriter.flush();
+ logWriter.close();
+ logWriter = null;
+ currentFileName = null;
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java
new file mode 100644
index 0000000..706eb8e
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java
@@ -0,0 +1,244 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.destination;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.provider.MiscUtil;
+
+/**
+ * This class write the logs to local file
+ */
+public class HDFSAuditDestination extends AuditDestination {
+ private static final Log logger = LogFactory
+ .getLog(HDFSAuditDestination.class);
+
+ public static final String PROP_HDFS_DIR = "dir";
+ public static final String PROP_HDFS_SUBDIR = "subdir";
+ public static final String PROP_HDFS_FILE_NAME_FORMAT = "filename.format";
+ public static final String PROP_HDFS_ROLLOVER = "file.rollover.sec";
+
+ String baseFolder = null;
+ String fileFormat = null;
+ int fileRolloverSec = 24 * 60 * 60; // In seconds
+ private String logFileNameFormat;
+
+ boolean initDone = false;
+
+ private String logFolder;
+ PrintWriter logWriter = null;
+
+ private Date fileCreateTime = null;
+
+ private String currentFileName;
+
+ private boolean isStopped = false;
+
+ @Override
+ public void init(Properties prop, String propPrefix) {
+ super.init(prop, propPrefix);
+
+ // Initialize properties for this class
+ // Initial folder and file properties
+ String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
+ + "." + PROP_HDFS_DIR);
+ String logSubFolder = MiscUtil.getStringProperty(props, propPrefix
+ + "." + PROP_HDFS_SUBDIR);
+ if (logSubFolder == null || logSubFolder.isEmpty()) {
+ logSubFolder = "%app-type%/%time:yyyyMMdd%";
+ }
+
+ logFileNameFormat = MiscUtil.getStringProperty(props, propPrefix + "."
+ + PROP_HDFS_FILE_NAME_FORMAT);
+ fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "."
+ + PROP_HDFS_ROLLOVER, fileRolloverSec);
+
+ if (logFileNameFormat == null || logFileNameFormat.isEmpty()) {
+ logFileNameFormat = "%app-type%_ranger_audit_%hostname%" + ".log";
+ }
+
+ if (logFolderProp == null || logFolderProp.isEmpty()) {
+ logger.fatal("File destination folder is not configured. Please set "
+ + propPrefix + "." + PROP_HDFS_DIR + ". name=" + getName());
+ return;
+ }
+
+ logFolder = logFolderProp + "/" + logSubFolder;
+ logger.info("logFolder=" + logFolder + ", destName=" + getName());
+ logger.info("logFileNameFormat=" + logFileNameFormat + ", destName="
+ + getName());
+
+ initDone = true;
+ }
+
+ @Override
+ public boolean logJSON(Collection<String> events) {
+ try {
+ PrintWriter out = getLogFileStream();
+ for (String event : events) {
+ out.println(event);
+ }
+ out.flush();
+ } catch (Throwable t) {
+ logError("Error writing to log file.", t);
+ return false;
+ }
+ return true;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#log(java.util.Collection)
+ */
+ @Override
+ synchronized public boolean log(Collection<AuditEventBase> events) {
+ if (isStopped) {
+ logError("log() called after stop was requested. name=" + getName());
+ return false;
+ }
+ List<String> jsonList = new ArrayList<String>();
+ for (AuditEventBase event : events) {
+ try {
+ jsonList.add(MiscUtil.stringify(event));
+ } catch (Throwable t) {
+ logger.error("Error converting to JSON. event=" + event);
+ }
+ }
+ return logJSON(jsonList);
+
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#start()
+ */
+ @Override
+ public void start() {
+ // Nothing to do here. We will open the file when the first log request
+ // comes
+ }
+
+ @Override
+ synchronized public void stop() {
+ try {
+ if (logWriter != null) {
+ logWriter.flush();
+ logWriter.close();
+ logWriter = null;
+ isStopped = true;
+ }
+ } catch (Throwable t) {
+ logger.error("Error closing HDFS file.", t);
+ }
+ }
+
+ // Helper methods in this class
+ synchronized private PrintWriter getLogFileStream() throws Throwable {
+ closeFileIfNeeded();
+
+ // Either there are no open log file or the previous one has been rolled
+ // over
+ if (logWriter == null) {
+ Date currentTime = new Date();
+ // Create a new file
+ String fileName = MiscUtil.replaceTokens(logFileNameFormat,
+ currentTime.getTime());
+ String parentFolder = MiscUtil.replaceTokens(logFolder,
+ currentTime.getTime());
+ Configuration conf = new Configuration();
+
+ String fullPath = parentFolder
+ + org.apache.hadoop.fs.Path.SEPARATOR + fileName;
+ String defaultPath = fullPath;
+ URI uri = URI.create(fullPath);
+ FileSystem fileSystem = FileSystem.get(uri, conf);
+
+ Path hdfPath = new Path(fullPath);
+ logger.info("Checking whether log file exists. hdfPath=" + fullPath);
+ int i = 0;
+ while (fileSystem.exists(hdfPath)) {
+ i++;
+ int lastDot = defaultPath.lastIndexOf('.');
+ String baseName = defaultPath.substring(0, lastDot);
+ String extension = defaultPath.substring(lastDot);
+ fullPath = baseName + "." + i + extension;
+ hdfPath = new Path(fullPath);
+ logger.info("Checking whether log file exists. hdfPath=" + fullPath);
+ }
+ logger.info("Log file doesn't exists. Will create and use it. hdfPath=" + fullPath);
+ // Create parent folders
+ createParents(hdfPath, fileSystem);
+
+ // Create the file to write
+ logger.info("Creating new log file. hdfPath=" + fullPath);
+ FSDataOutputStream ostream = fileSystem.create(hdfPath);
+ logWriter = new PrintWriter(ostream);
+ fileCreateTime = new Date();
+ currentFileName = fullPath;
+ }
+ return logWriter;
+ }
+
+ private void createParents(Path pathLogfile, FileSystem fileSystem)
+ throws Throwable {
+ logger.info("Creating parent folder for " + pathLogfile);
+ Path parentPath = pathLogfile != null ? pathLogfile.getParent() : null;
+
+ if (parentPath != null && fileSystem != null
+ && !fileSystem.exists(parentPath)) {
+ fileSystem.mkdirs(parentPath);
+ }
+ }
+
+ private void closeFileIfNeeded() throws FileNotFoundException, IOException {
+ if (logWriter == null) {
+ return;
+ }
+ // TODO: Close the file on absolute time. Currently it is implemented as
+ // relative time
+ if (System.currentTimeMillis() - fileCreateTime.getTime() > fileRolloverSec * 1000) {
+ logger.info("Closing file. Rolling over. name=" + getName()
+ + ", fileName=" + currentFileName);
+ logWriter.flush();
+ logWriter.close();
+ logWriter = null;
+ currentFileName = null;
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java
index a44e047..39a2578 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java
@@ -19,6 +19,8 @@
package org.apache.ranger.audit.model;
+import java.util.Date;
+
import org.apache.ranger.audit.dao.DaoManager;
public abstract class AuditEventBase {
@@ -27,7 +29,12 @@ public abstract class AuditEventBase {
}
public abstract void persist(DaoManager daoManager);
-
+
+ public abstract String getEventKey();
+ public abstract Date getEventTime ();
+ public abstract void setEventCount(long frequencyCount);
+ public abstract void setEventDurationMS(long frequencyDurationMS);
+
protected String trim(String str, int len) {
String ret = str;
if (str != null) {
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java
index af89f60..d648de3 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java
@@ -24,87 +24,87 @@ import java.util.Date;
import org.apache.ranger.audit.dao.DaoManager;
import org.apache.ranger.audit.entity.AuthzAuditEventDbObj;
-import com.google.gson.Gson;
import com.google.gson.annotations.SerializedName;
-
public class AuthzAuditEvent extends AuditEventBase {
protected static String FIELD_SEPARATOR = ";";
- protected static final int MAX_ACTION_FIELD_SIZE = 1800 ;
- protected static final int MAX_REQUEST_DATA_FIELD_SIZE = 1800 ;
+ protected static final int MAX_ACTION_FIELD_SIZE = 1800;
+ protected static final int MAX_REQUEST_DATA_FIELD_SIZE = 1800;
@SerializedName("repoType")
- protected int repositoryType = 0;
+ protected int repositoryType = 0;
@SerializedName("repo")
protected String repositoryName = null;
@SerializedName("reqUser")
- protected String user = null;
+ protected String user = null;
@SerializedName("evtTime")
- protected Date eventTime = new Date();
+ protected Date eventTime = new Date();
@SerializedName("access")
- protected String accessType = null;
+ protected String accessType = null;
@SerializedName("resource")
- protected String resourcePath = null;
+ protected String resourcePath = null;
@SerializedName("resType")
- protected String resourceType = null;
+ protected String resourceType = null;
@SerializedName("action")
- protected String action = null;
+ protected String action = null;
@SerializedName("result")
- protected short accessResult = 0; // 0 - DENIED; 1 - ALLOWED; HTTP return code
+ protected short accessResult = 0; // 0 - DENIED; 1 - ALLOWED; HTTP return
+ // code
@SerializedName("agent")
- protected String agentId = null;
+ protected String agentId = null;
@SerializedName("policy")
- protected long policyId = 0;
+ protected long policyId = 0;
@SerializedName("reason")
- protected String resultReason = null;
+ protected String resultReason = null;
@SerializedName("enforcer")
- protected String aclEnforcer = null;
+ protected String aclEnforcer = null;
@SerializedName("sess")
- protected String sessionId = null;
+ protected String sessionId = null;
@SerializedName("cliType")
- protected String clientType = null;
+ protected String clientType = null;
@SerializedName("cliIP")
- protected String clientIP = null;
+ protected String clientIP = null;
@SerializedName("reqData")
- protected String requestData = null;
+ protected String requestData = null;
@SerializedName("agentHost")
- protected String agentHostname = null;
+ protected String agentHostname = null;
@SerializedName("logType")
- protected String logType = null;
+ protected String logType = null;
@SerializedName("id")
- protected String eventId = null;
+ protected String eventId = null;
/**
- * This to ensure order within a session. Order not guaranteed across processes and hosts
+ * This to ensure order within a session. Order not guaranteed across
+ * processes and hosts
*/
@SerializedName("seq_num")
protected long seqNum = 0;
- @SerializedName("freq_count")
- protected long frequencyCount = 1;
+ @SerializedName("event_count")
+ protected long eventCount = 1;
- @SerializedName("freq_dur_ms")
- protected long frequencyDurationMS = 0;
+ @SerializedName("event_dur_ms")
+ protected long eventDurationMS = 0;
public AuthzAuditEvent() {
super();
@@ -112,40 +112,29 @@ public class AuthzAuditEvent extends AuditEventBase {
this.repositoryType = 0;
}
- public AuthzAuditEvent(int repositoryType,
- String repositoryName,
- String user,
- Date eventTime,
- String accessType,
- String resourcePath,
- String resourceType,
- String action,
- short accessResult,
- String agentId,
- long policyId,
- String resultReason,
- String aclEnforcer,
- String sessionId,
- String clientType,
- String clientIP,
- String requestData) {
+ public AuthzAuditEvent(int repositoryType, String repositoryName,
+ String user, Date eventTime, String accessType,
+ String resourcePath, String resourceType, String action,
+ short accessResult, String agentId, long policyId,
+ String resultReason, String aclEnforcer, String sessionId,
+ String clientType, String clientIP, String requestData) {
this.repositoryType = repositoryType;
this.repositoryName = repositoryName;
- this.user = user;
- this.eventTime = eventTime;
- this.accessType = accessType;
- this.resourcePath = resourcePath;
- this.resourceType = resourceType;
- this.action = action;
- this.accessResult = accessResult;
- this.agentId = agentId;
- this.policyId = policyId;
- this.resultReason = resultReason;
- this.aclEnforcer = aclEnforcer;
- this.sessionId = sessionId;
- this.clientType = clientType;
- this.clientIP = clientIP;
- this.requestData = requestData;
+ this.user = user;
+ this.eventTime = eventTime;
+ this.accessType = accessType;
+ this.resourcePath = resourcePath;
+ this.resourceType = resourceType;
+ this.action = action;
+ this.accessResult = accessResult;
+ this.agentId = agentId;
+ this.policyId = policyId;
+ this.resultReason = resultReason;
+ this.aclEnforcer = aclEnforcer;
+ this.sessionId = sessionId;
+ this.clientType = clientType;
+ this.clientIP = clientIP;
+ this.requestData = requestData;
}
/**
@@ -156,7 +145,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param repositoryType the repositoryType to set
+ * @param repositoryType
+ * the repositoryType to set
*/
public void setRepositoryType(int repositoryType) {
this.repositoryType = repositoryType;
@@ -170,7 +160,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param repositoryName the repositoryName to set
+ * @param repositoryName
+ * the repositoryName to set
*/
public void setRepositoryName(String repositoryName) {
this.repositoryName = repositoryName;
@@ -184,7 +175,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param user the user to set
+ * @param user
+ * the user to set
*/
public void setUser(String user) {
this.user = user;
@@ -198,7 +190,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param timeStamp the timeStamp to set
+ * @param timeStamp
+ * the timeStamp to set
*/
public void setEventTime(Date eventTime) {
this.eventTime = eventTime;
@@ -212,7 +205,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param accessType the accessType to set
+ * @param accessType
+ * the accessType to set
*/
public void setAccessType(String accessType) {
this.accessType = accessType;
@@ -226,7 +220,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param resourcePath the resourcePath to set
+ * @param resourcePath
+ * the resourcePath to set
*/
public void setResourcePath(String resourcePath) {
this.resourcePath = resourcePath;
@@ -240,7 +235,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param resourceType the resourceType to set
+ * @param resourceType
+ * the resourceType to set
*/
public void setResourceType(String resourceType) {
this.resourceType = resourceType;
@@ -250,11 +246,12 @@ public class AuthzAuditEvent extends AuditEventBase {
* @return the action
*/
public String getAction() {
- return trim(action, MAX_ACTION_FIELD_SIZE) ;
+ return trim(action, MAX_ACTION_FIELD_SIZE);
}
/**
- * @param action the action to set
+ * @param action
+ * the action to set
*/
public void setAction(String action) {
this.action = action;
@@ -268,7 +265,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param accessResult the accessResult to set
+ * @param accessResult
+ * the accessResult to set
*/
public void setAccessResult(short accessResult) {
this.accessResult = accessResult;
@@ -282,7 +280,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param agentId the agentId to set
+ * @param agentId
+ * the agentId to set
*/
public void setAgentId(String agentId) {
this.agentId = agentId;
@@ -296,7 +295,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param policyId the policyId to set
+ * @param policyId
+ * the policyId to set
*/
public void setPolicyId(long policyId) {
this.policyId = policyId;
@@ -310,7 +310,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param resultReason the resultReason to set
+ * @param resultReason
+ * the resultReason to set
*/
public void setResultReason(String resultReason) {
this.resultReason = resultReason;
@@ -324,7 +325,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param aclEnforcer the aclEnforcer to set
+ * @param aclEnforcer
+ * the aclEnforcer to set
*/
public void setAclEnforcer(String aclEnforcer) {
this.aclEnforcer = aclEnforcer;
@@ -338,7 +340,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param sessionId the sessionId to set
+ * @param sessionId
+ * the sessionId to set
*/
public void setSessionId(String sessionId) {
this.sessionId = sessionId;
@@ -352,7 +355,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param clientType the clientType to set
+ * @param clientType
+ * the clientType to set
*/
public void setClientType(String clientType) {
this.clientType = clientType;
@@ -366,7 +370,8 @@ public class AuthzAuditEvent extends AuditEventBase {
}
/**
- * @param clientIP the clientIP to set
+ * @param clientIP
+ * the clientIP to set
*/
public void setClientIP(String clientIP) {
this.clientIP = clientIP;
@@ -376,11 +381,12 @@ public class AuthzAuditEvent extends AuditEventBase {
* @return the requestData
*/
public String getRequestData() {
- return trim(requestData, MAX_REQUEST_DATA_FIELD_SIZE) ;
+ return trim(requestData, MAX_REQUEST_DATA_FIELD_SIZE);
}
/**
- * @param requestData the requestData to set
+ * @param requestData
+ * the requestData to set
*/
public void setRequestData(String requestData) {
this.requestData = requestData;
@@ -410,8 +416,6 @@ public class AuthzAuditEvent extends AuditEventBase {
this.eventId = eventId;
}
-
-
public long getSeqNum() {
return seqNum;
}
@@ -420,20 +424,28 @@ public class AuthzAuditEvent extends AuditEventBase {
this.seqNum = seqNum;
}
- public long getFrequencyCount() {
- return frequencyCount;
+ public long getEventCount() {
+ return eventCount;
}
- public void setFrequencyCount(long frequencyCount) {
- this.frequencyCount = frequencyCount;
+ public void setEventCount(long frequencyCount) {
+ this.eventCount = frequencyCount;
}
- public long getFrequencyDurationMS() {
- return frequencyDurationMS;
+ public long getEventDurationMS() {
+ return eventDurationMS;
}
- public void setFrequencyDurationMS(long frequencyDurationMS) {
- this.frequencyDurationMS = frequencyDurationMS;
+ public void setEventDurationMS(long frequencyDurationMS) {
+ this.eventDurationMS = frequencyDurationMS;
+ }
+
+ @Override
+ public String getEventKey() {
+ String key = user + "^" + accessType + "^" + resourcePath + "^"
+ + resourceType + "^" + action + "^" + accessResult + "^"
+ + sessionId + "^" + clientIP;
+ return key;
}
@Override
@@ -448,35 +460,42 @@ public class AuthzAuditEvent extends AuditEventBase {
}
protected StringBuilder toString(StringBuilder sb) {
- sb.append("repositoryType=").append(repositoryType).append(FIELD_SEPARATOR)
- .append("repositoryName=").append(repositoryName).append(FIELD_SEPARATOR)
- .append("user=").append(user).append(FIELD_SEPARATOR)
- .append("eventTime=").append(eventTime).append(FIELD_SEPARATOR)
- .append("accessType=").append(accessType).append(FIELD_SEPARATOR)
- .append("resourcePath=").append(resourcePath).append(FIELD_SEPARATOR)
- .append("resourceType=").append(resourceType).append(FIELD_SEPARATOR)
- .append("action=").append(action).append(FIELD_SEPARATOR)
- .append("accessResult=").append(accessResult).append(FIELD_SEPARATOR)
- .append("agentId=").append(agentId).append(FIELD_SEPARATOR)
- .append("policyId=").append(policyId).append(FIELD_SEPARATOR)
- .append("resultReason=").append(resultReason).append(FIELD_SEPARATOR)
- .append("aclEnforcer=").append(aclEnforcer).append(FIELD_SEPARATOR)
- .append("sessionId=").append(sessionId).append(FIELD_SEPARATOR)
- .append("clientType=").append(clientType).append(FIELD_SEPARATOR)
- .append("clientIP=").append(clientIP).append(FIELD_SEPARATOR)
- .append("requestData=").append(requestData).append(FIELD_SEPARATOR)
- .append("agentHostname=").append(agentHostname).append(FIELD_SEPARATOR)
- .append("logType=").append(logType).append(FIELD_SEPARATOR)
- .append("eventId=").append(eventId).append(FIELD_SEPARATOR)
- .append("seq_num=").append(seqNum).append(FIELD_SEPARATOR)
- .append("freq_count=").append(frequencyCount).append(FIELD_SEPARATOR)
- .append("freq_dur_ms=").append(frequencyDurationMS).append(FIELD_SEPARATOR)
- ;
+ sb.append("repositoryType=").append(repositoryType)
+ .append(FIELD_SEPARATOR).append("repositoryName=")
+ .append(repositoryName).append(FIELD_SEPARATOR).append("user=")
+ .append(user).append(FIELD_SEPARATOR).append("eventTime=")
+ .append(eventTime).append(FIELD_SEPARATOR)
+ .append("accessType=").append(accessType)
+ .append(FIELD_SEPARATOR).append("resourcePath=")
+ .append(resourcePath).append(FIELD_SEPARATOR)
+ .append("resourceType=").append(resourceType)
+ .append(FIELD_SEPARATOR).append("action=").append(action)
+ .append(FIELD_SEPARATOR).append("accessResult=")
+ .append(accessResult).append(FIELD_SEPARATOR)
+ .append("agentId=").append(agentId).append(FIELD_SEPARATOR)
+ .append("policyId=").append(policyId).append(FIELD_SEPARATOR)
+ .append("resultReason=").append(resultReason)
+ .append(FIELD_SEPARATOR).append("aclEnforcer=")
+ .append(aclEnforcer).append(FIELD_SEPARATOR)
+ .append("sessionId=").append(sessionId).append(FIELD_SEPARATOR)
+ .append("clientType=").append(clientType)
+ .append(FIELD_SEPARATOR).append("clientIP=").append(clientIP)
+ .append(FIELD_SEPARATOR).append("requestData=")
+ .append(requestData).append(FIELD_SEPARATOR)
+ .append("agentHostname=").append(agentHostname)
+ .append(FIELD_SEPARATOR).append("logType=").append(logType)
+ .append(FIELD_SEPARATOR).append("eventId=").append(eventId)
+ .append(FIELD_SEPARATOR).append("seq_num=").append(seqNum)
+ .append(FIELD_SEPARATOR).append("event_count=")
+ .append(eventCount).append(FIELD_SEPARATOR)
+ .append("event_dur_ms=").append(eventDurationMS)
+ .append(FIELD_SEPARATOR);
return sb;
}
@Override
public void persist(DaoManager daoManager) {
- daoManager.getAuthzAuditEventDao().create(new AuthzAuditEventDbObj(this));
+ daoManager.getAuthzAuditEventDao().create(
+ new AuthzAuditEventDbObj(this));
}
}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditAsyncQueue.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditAsyncQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditAsyncQueue.java
deleted file mode 100644
index 5553bcc..0000000
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditAsyncQueue.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ranger.audit.provider;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.concurrent.LinkedTransferQueue;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.ranger.audit.model.AuditEventBase;
-
-/**
- * This is a non-blocking queue with no limit on capacity.
- */
-public class AuditAsyncQueue extends BaseAuditProvider implements Runnable {
- private static final Log logger = LogFactory.getLog(AuditAsyncQueue.class);
-
- LinkedTransferQueue<AuditEventBase> queue = new LinkedTransferQueue<AuditEventBase>();
- Thread consumerThread = null;
-
- static int threadCount = 0;
- static final String DEFAULT_NAME = "async";
-
- public AuditAsyncQueue() {
- setName(DEFAULT_NAME);
- }
-
- public AuditAsyncQueue(AuditProvider consumer) {
- super(consumer);
- setName(DEFAULT_NAME);
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger.
- * audit.model.AuditEventBase)
- */
- @Override
- public boolean log(AuditEventBase event) {
- // Add to the queue and return ASAP
- if (queue.size() >= getMaxQueueSize()) {
- return false;
- }
- queue.add(event);
- addLifeTimeInLogCount(1);
- return true;
- }
-
- @Override
- public boolean log(Collection<AuditEventBase> events) {
- for (AuditEventBase event : events) {
- log(event);
- }
- return true;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#start()
- */
- @Override
- public void start() {
- if(consumer != null) {
- consumer.start();
- }
-
- consumerThread = new Thread(this, this.getClass().getName()
- + (threadCount++));
- consumerThread.setDaemon(true);
- consumerThread.start();
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#stop()
- */
- @Override
- public void stop() {
- setDrain(true);
- try {
- consumerThread.interrupt();
- } catch (Throwable t) {
- // ignore any exception
- }
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
- */
- @Override
- public boolean isFlushPending() {
- if (queue.isEmpty()) {
- return consumer.isFlushPending();
- }
- return true;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Runnable#run()
- */
- @Override
- public void run() {
- while (true) {
- try {
- AuditEventBase event = null;
- if (!isDrain()) {
- // For Transfer queue take() is blocking
- event = queue.take();
- } else {
- // For Transfer queue poll() is non blocking
- event = queue.poll();
- }
- if (event != null) {
- Collection<AuditEventBase> eventList = new ArrayList<AuditEventBase>();
- eventList.add(event);
- // TODO: Put a limit. Hard coding to 1000 (use batch size
- // property)
- queue.drainTo(eventList, 1000 - 1);
- consumer.log(eventList);
- eventList.clear();
- }
- } catch (InterruptedException e) {
- logger.info(
- "Caught exception in consumer thread. Mostly to about loop",
- e);
- } catch (Throwable t) {
- logger.error("Caught error during processing request.", t);
- }
- if (isDrain() && queue.isEmpty()) {
- break;
- }
- }
- try {
- // Call stop on the consumer
- consumer.stop();
- } catch (Throwable t) {
- logger.error("Error while calling stop on consumer.", t);
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditBatchProcessor.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditBatchProcessor.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditBatchProcessor.java
deleted file mode 100644
index 58d122a..0000000
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditBatchProcessor.java
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ranger.audit.provider;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Properties;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.ranger.audit.model.AuditEventBase;
-
-public class AuditBatchProcessor extends BaseAuditProvider implements Runnable {
- private static final Log logger = LogFactory
- .getLog(AuditBatchProcessor.class);
-
- private BlockingQueue<AuditEventBase> queue = null;
- private Collection<AuditEventBase> localBatchBuffer = new ArrayList<AuditEventBase>();
-
- Thread consumerThread = null;
- static int threadCount = 0;
-
- public AuditBatchProcessor() {
- }
-
- public AuditBatchProcessor(AuditProvider consumer) {
- super(consumer);
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger.
- * audit.model.AuditEventBase)
- */
- @Override
- public boolean log(AuditEventBase event) {
- // Add to batchQueue. Block if full
- queue.add(event);
- addLifeTimeInLogCount(1);
- return true;
- }
-
- @Override
- public boolean log(Collection<AuditEventBase> events) {
- for (AuditEventBase event : events) {
- log(event);
- }
- return true;
- }
-
- @Override
- public void init(Properties prop, String basePropertyName) {
- String propPrefix = "xasecure.audit.batch";
- if (basePropertyName != null) {
- propPrefix = basePropertyName;
- }
-
- super.init(prop, propPrefix);
-
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#start()
- */
- @Override
- synchronized public void start() {
- if (consumerThread != null) {
- logger.error("Provider is already started. name=" + getName());
- return;
- }
- logger.info("Creating ArrayBlockingQueue with maxSize="
- + getMaxQueueSize());
- queue = new ArrayBlockingQueue<AuditEventBase>(getMaxQueueSize());
-
- // Start the consumer first
- consumer.start();
-
- // Then the FileSpooler
- if (fileSpoolerEnabled) {
- fileSpooler.start();
- }
-
- // Finally the queue listener
- consumerThread = new Thread(this, this.getClass().getName()
- + (threadCount++));
- consumerThread.setDaemon(true);
- consumerThread.start();
-
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#stop()
- */
- @Override
- public void stop() {
- setDrain(true);
- flush();
- try {
- consumerThread.interrupt();
- } catch (Throwable t) {
- // ignore any exception
- }
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete()
- */
- @Override
- public void waitToComplete() {
- int defaultTimeOut = -1;
- waitToComplete(defaultTimeOut);
- consumer.waitToComplete(defaultTimeOut);
- }
-
- @Override
- public void waitToComplete(long timeout) {
- setDrain(true);
- flush();
- long sleepTime = 1000;
- long startTime = System.currentTimeMillis();
- int prevQueueSize = -1;
- int staticLoopCount = 0;
- while ((queue.size() > 0 || localBatchBuffer.size() > 0)) {
- if (prevQueueSize == queue.size()) {
- logger.error("Queue size is not changing. " + getName()
- + ".size=" + queue.size());
- staticLoopCount++;
- if (staticLoopCount > 5) {
- logger.error("Aborting writing to consumer. Some logs will be discarded."
- + getName() + ".size=" + queue.size());
- }
- } else {
- staticLoopCount = 0;
- }
- consumerThread.interrupt();
- try {
- Thread.sleep(sleepTime);
- if (timeout > 0
- && (System.currentTimeMillis() - startTime > timeout)) {
- break;
- }
- } catch (InterruptedException e) {
- break;
- }
- }
- consumer.waitToComplete(timeout);
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
- */
- @Override
- public boolean isFlushPending() {
- if (queue.isEmpty()) {
- return consumer.isFlushPending();
- }
- return true;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#flush()
- */
- @Override
- public void flush() {
- if (fileSpoolerEnabled) {
- fileSpooler.flush();
- }
- consumer.flush();
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Runnable#run()
- */
- @Override
- public void run() {
- long lastDispatchTime = System.currentTimeMillis();
- boolean isDestActive = true;
- while (true) {
- // Time to next dispatch
- long nextDispatchDuration = lastDispatchTime
- - System.currentTimeMillis() + getMaxBatchInterval();
-
- boolean isToSpool = false;
- boolean fileSpoolDrain = false;
- try {
- if (fileSpoolerEnabled && fileSpooler.isPending()) {
- int percentUsed = (getMaxQueueSize() - queue.size()) * 100
- / getMaxQueueSize();
- long lastAttemptDelta = fileSpooler
- .getLastAttemptTimeDelta();
-
- fileSpoolDrain = lastAttemptDelta > fileSpoolMaxWaitTime;
- // If we should even read from queue?
- if (!isDrain() && !fileSpoolDrain
- && percentUsed < fileSpoolDrainThresholdPercent) {
- // Since some files are still under progress and it is
- // not in drain mode, lets wait and retry
- if (nextDispatchDuration > 0) {
- Thread.sleep(nextDispatchDuration);
- }
- continue;
- }
- isToSpool = true;
- }
-
- AuditEventBase event = null;
-
- if (!isToSpool && !isDrain() && !fileSpoolDrain
- && nextDispatchDuration > 0) {
- event = queue.poll(nextDispatchDuration,
- TimeUnit.MILLISECONDS);
-
- } else {
- // For poll() is non blocking
- event = queue.poll();
- }
- if (event != null) {
- localBatchBuffer.add(event);
- if (getMaxBatchSize() >= localBatchBuffer.size()) {
- queue.drainTo(localBatchBuffer, getMaxBatchSize()
- - localBatchBuffer.size());
- }
- }
- } catch (InterruptedException e) {
- logger.info(
- "Caught exception in consumer thread. Mostly to abort loop",
- e);
- } catch (Throwable t) {
- logger.error("Caught error during processing request.", t);
- }
-
- if (localBatchBuffer.size() > 0 && isToSpool) {
- // Let spool to the file directly
- if (isDestActive) {
- logger.info("Switching to file spool. Queue=" + getName()
- + ", dest=" + consumer.getName());
- }
- isDestActive = false;
- fileSpooler.stashLogs(localBatchBuffer);
- localBatchBuffer.clear();
- // Reset all variables
- lastDispatchTime = System.currentTimeMillis();
- } else if (localBatchBuffer.size() > 0
- && (isDrain()
- || localBatchBuffer.size() >= getMaxBatchSize() || nextDispatchDuration <= 0)) {
- if (fileSpoolerEnabled && !isDestActive) {
- logger.info("Switching to writing to destination. Queue="
- + getName() + ", dest=" + consumer.getName());
- }
- boolean ret = consumer.log(localBatchBuffer);
- if (!ret) {
- if (fileSpoolerEnabled) {
- logger.info("Switching to file spool. Queue="
- + getName() + ", dest=" + consumer.getName());
- // Transient error. Stash and move on
- fileSpooler.stashLogs(localBatchBuffer);
- isDestActive = false;
- } else {
- // We need to drop this event
- logFailedEvent(localBatchBuffer, null);
- }
- } else {
- isDestActive = true;
- }
- localBatchBuffer.clear();
- // Reset all variables
- lastDispatchTime = System.currentTimeMillis();
- }
-
- if (isDrain()) {
- if (!queue.isEmpty() || localBatchBuffer.size() > 0) {
- logger.info("Queue is not empty. Will retry. queue.size)="
- + queue.size() + ", localBatchBuffer.size()="
- + localBatchBuffer.size());
- } else {
- break;
- }
- }
- }
-
- logger.info("Exiting consumerThread. Queue=" + getName() + ", dest="
- + consumer.getName());
- try {
- // Call stop on the consumer
- consumer.stop();
- if (fileSpoolerEnabled) {
- fileSpooler.stop();
- }
- } catch (Throwable t) {
- logger.error("Error while calling stop on consumer.", t);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditDestination.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditDestination.java
deleted file mode 100644
index 11c32ca..0000000
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditDestination.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ranger.audit.provider;
-
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * This class needs to be extended by anyone who wants to build custom
- * destination
- */
-public abstract class AuditDestination extends BaseAuditProvider {
- private static final Log logger = LogFactory.getLog(AuditDestination.class);
-
- public AuditDestination() {
- logger.info("AuditDestination() enter");
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#init(java.util.Properties,
- * java.lang.String)
- */
- @Override
- public void init(Properties prop, String basePropertyName) {
- super.init(prop, basePropertyName);
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
- */
- @Override
- public boolean isFlushPending() {
- return false;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#flush()
- */
- @Override
- public void flush() {
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileSpool.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileSpool.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileSpool.java
deleted file mode 100644
index 8b006de..0000000
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileSpool.java
+++ /dev/null
@@ -1,875 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ranger.audit.provider;
-
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.File;
-import java.io.FileFilter;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Properties;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedTransferQueue;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.ranger.audit.model.AuditEventBase;
-
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-
-/**
- * This class temporarily stores logs in file system if the destination is
- * overloaded or down
- */
-public class AuditFileSpool implements Runnable {
- private static final Log logger = LogFactory.getLog(AuditFileSpool.class);
-
- public enum SPOOL_FILE_STATUS {
- pending, write_inprogress, read_inprogress, done
- }
-
- public static final String PROP_FILE_SPOOL_LOCAL_DIR = "filespool.dir";
- public static final String PROP_FILE_SPOOL_LOCAL_FILE_NAME = "filespool.filename.format";
- public static final String PROP_FILE_SPOOL_ARCHIVE_DIR = "filespool.archive.dir";
- public static final String PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT = "filespool.archive.max.files";
- public static final String PROP_FILE_SPOOL_FILENAME_PREFIX = "filespool.file.prefix";
- public static final String PROP_FILE_SPOOL_FILE_ROLLOVER = "filespool.file.rollover.sec";
- public static final String PROP_FILE_SPOOL_INDEX_FILE = "filespool.index.filename";
- // public static final String PROP_FILE_SPOOL_INDEX_DONE_FILE =
- // "filespool.index.done_filename";
- public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms";
-
- AuditProvider queueProvider = null;
- AuditProvider consumerProvider = null;
-
- BlockingQueue<AuditIndexRecord> indexQueue = new LinkedTransferQueue<AuditIndexRecord>();
-
- // Folder and File attributes
- File logFolder = null;
- String logFileNameFormat = null;
- File archiveFolder = null;
- String fileNamePrefix = null;
- String indexFileName = null;
- File indexFile = null;
- String indexDoneFileName = null;
- File indexDoneFile = null;
- int retryDestinationMS = 30 * 1000; // Default 30 seconds
- int fileRolloverSec = 24 * 60 * 60; // In seconds
- int maxArchiveFiles = 100;
-
- int errorLogIntervalMS = 30 * 1000; // Every 30 seconds
- long lastErrorLogMS = 0;
-
- List<AuditIndexRecord> indexRecords = new ArrayList<AuditIndexRecord>();
-
- boolean isPending = false;
- long lastAttemptTime = 0;
- boolean initDone = false;
-
- PrintWriter logWriter = null;
- AuditIndexRecord currentWriterIndexRecord = null;
- AuditIndexRecord currentConsumerIndexRecord = null;
-
- BufferedReader logReader = null;
-
- Thread destinationThread = null;
-
- boolean isWriting = true;
- boolean isDrain = false;
- boolean isDestDown = true;
-
- private static Gson gson = null;
-
- public AuditFileSpool(AuditProvider queueProvider,
- AuditProvider consumerProvider) {
- this.queueProvider = queueProvider;
- this.consumerProvider = consumerProvider;
- }
-
- public void init(Properties prop) {
- init(prop, null);
- }
-
- public void init(Properties props, String basePropertyName) {
- if (initDone) {
- logger.error("init() called more than once. queueProvider="
- + queueProvider.getName() + ", consumerProvider="
- + consumerProvider.getName());
- return;
- }
- String propPrefix = "xasecure.audit.filespool";
- if (basePropertyName != null) {
- propPrefix = basePropertyName;
- }
-
- try {
- gson = new GsonBuilder().setDateFormat("yyyy-MM-dd HH:mm:ss.SSS")
- .create();
-
- // Initial folder and file properties
- String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
- + "." + PROP_FILE_SPOOL_LOCAL_DIR);
- logFileNameFormat = MiscUtil.getStringProperty(props,
- basePropertyName + "." + PROP_FILE_SPOOL_LOCAL_FILE_NAME);
- String archiveFolderProp = MiscUtil.getStringProperty(props,
- propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_DIR);
- fileNamePrefix = MiscUtil.getStringProperty(props, propPrefix + "."
- + PROP_FILE_SPOOL_FILENAME_PREFIX);
- indexFileName = MiscUtil.getStringProperty(props, propPrefix + "."
- + PROP_FILE_SPOOL_INDEX_FILE);
- retryDestinationMS = MiscUtil.getIntProperty(props, propPrefix
- + "." + PROP_FILE_SPOOL_DEST_RETRY_MS, retryDestinationMS);
- fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "."
- + PROP_FILE_SPOOL_FILE_ROLLOVER, fileRolloverSec);
- maxArchiveFiles = MiscUtil.getIntProperty(props, propPrefix + "."
- + PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, maxArchiveFiles);
-
- logger.info("retryDestinationMS=" + retryDestinationMS
- + ", queueName=" + queueProvider.getName());
- logger.info("fileRolloverSec=" + fileRolloverSec + ", queueName="
- + queueProvider.getName());
- logger.info("maxArchiveFiles=" + maxArchiveFiles + ", queueName="
- + queueProvider.getName());
-
- if (logFolderProp == null || logFolderProp.isEmpty()) {
- logger.error("Audit spool folder is not configured. Please set "
- + propPrefix
- + "."
- + PROP_FILE_SPOOL_LOCAL_DIR
- + ". queueName=" + queueProvider.getName());
- return;
- }
- logFolder = new File(logFolderProp);
- if (!logFolder.isDirectory()) {
- logFolder.mkdirs();
- if (!logFolder.isDirectory()) {
- logger.error("File Spool folder not found and can't be created. folder="
- + logFolder.getAbsolutePath()
- + ", queueName="
- + queueProvider.getName());
- return;
- }
- }
- logger.info("logFolder=" + logFolder + ", queueName="
- + queueProvider.getName());
-
- if (logFileNameFormat == null || logFileNameFormat.isEmpty()) {
- logFileNameFormat = "spool_" + "%app-type%" + "_"
- + "%time:yyyyMMdd-HHmm.ss%.log";
- }
- logger.info("logFileNameFormat=" + logFileNameFormat
- + ", queueName=" + queueProvider.getName());
-
- if (archiveFolderProp == null || archiveFolderProp.isEmpty()) {
- archiveFolder = new File(logFolder, "archive");
- } else {
- archiveFolder = new File(archiveFolderProp);
- }
- if (!archiveFolder.isDirectory()) {
- archiveFolder.mkdirs();
- if (!archiveFolder.isDirectory()) {
- logger.error("File Spool archive folder not found and can't be created. folder="
- + archiveFolder.getAbsolutePath()
- + ", queueName="
- + queueProvider.getName());
- return;
- }
- }
- logger.info("archiveFolder=" + archiveFolder + ", queueName="
- + queueProvider.getName());
-
- if (indexFileName == null || indexFileName.isEmpty()) {
- indexFileName = "index_" + fileNamePrefix + ".json";
- }
-
- indexFile = new File(logFolder, indexFileName);
- if (!indexFile.exists()) {
- indexFile.createNewFile();
- }
- logger.info("indexFile=" + indexFile + ", queueName="
- + queueProvider.getName());
-
- int lastDot = indexFileName.lastIndexOf('.');
- indexDoneFileName = indexFileName.substring(0, lastDot)
- + "_closed.json";
- indexDoneFile = new File(logFolder, indexDoneFileName);
- if (!indexDoneFile.exists()) {
- indexDoneFile.createNewFile();
- }
- logger.info("indexDoneFile=" + indexDoneFile + ", queueName="
- + queueProvider.getName());
-
- // Load index file
- loadIndexFile();
- for (AuditIndexRecord auditIndexRecord : indexRecords) {
- if (!auditIndexRecord.status.equals(SPOOL_FILE_STATUS.done)) {
- isPending = true;
- }
- if (auditIndexRecord.status
- .equals(SPOOL_FILE_STATUS.write_inprogress)) {
- currentWriterIndexRecord = auditIndexRecord;
- logger.info("currentWriterIndexRecord="
- + currentWriterIndexRecord.filePath
- + ", queueName=" + queueProvider.getName());
- }
- if (auditIndexRecord.status
- .equals(SPOOL_FILE_STATUS.read_inprogress)) {
- indexQueue.add(auditIndexRecord);
- }
- }
- printIndex();
- // One more loop to add the rest of the pending records in reverse
- // order
- for (int i = 0; i < indexRecords.size(); i++) {
- AuditIndexRecord auditIndexRecord = indexRecords.get(i);
- if (auditIndexRecord.status.equals(SPOOL_FILE_STATUS.pending)) {
- File consumerFile = new File(auditIndexRecord.filePath);
- if (!consumerFile.exists()) {
- logger.error("INIT: Consumer file="
- + consumerFile.getPath() + " not found.");
- System.exit(1);
- }
- indexQueue.add(auditIndexRecord);
- }
- }
-
- } catch (Throwable t) {
- logger.fatal("Error initializing File Spooler. queue="
- + queueProvider.getName(), t);
- return;
- }
- initDone = true;
- }
-
- /**
- * Start looking for outstanding logs and update status according.
- */
- public void start() {
- if (!initDone) {
- logger.error("Cannot start Audit File Spooler. Initilization not done yet. queueName="
- + queueProvider.getName());
- return;
- }
-
- logger.info("Starting writerThread, queueName="
- + queueProvider.getName() + ", consumer="
- + consumerProvider.getName());
-
- // Let's start the thread to read
- destinationThread = new Thread(this, queueProvider.getName()
- + "_destWriter");
- destinationThread.setDaemon(true);
- destinationThread.start();
- }
-
- public void stop() {
- if (!initDone) {
- logger.error("Cannot stop Audit File Spooler. Initilization not done. queueName="
- + queueProvider.getName());
- return;
- }
- logger.info("Stop called, queueName=" + queueProvider.getName()
- + ", consumer=" + consumerProvider.getName());
-
- isDrain = true;
- flush();
-
- PrintWriter out = getOpenLogFileStream();
- if (out != null) {
- // If write is still going on, then let's give it enough time to
- // complete
- for (int i = 0; i < 3; i++) {
- if (isWriting) {
- try {
- Thread.sleep(1000);
- } catch (InterruptedException e) {
- // ignore
- }
- continue;
- }
- try {
- logger.info("Closing open file, queueName="
- + queueProvider.getName() + ", consumer="
- + consumerProvider.getName());
-
- out.flush();
- out.close();
- } catch (Throwable t) {
- logger.debug("Error closing spool out file.", t);
- }
- }
- }
- try {
- destinationThread.interrupt();
- } catch (Throwable e) {
- // ignore
- }
- }
-
- public void flush() {
- if (!initDone) {
- logger.error("Cannot flush Audit File Spooler. Initilization not done. queueName="
- + queueProvider.getName());
- return;
- }
- PrintWriter out = getOpenLogFileStream();
- if (out != null) {
- out.flush();
- }
- }
-
- /**
- * If any files are still not processed. Also, if the destination is not
- * reachable
- *
- * @return
- */
- public boolean isPending() {
- if (!initDone) {
- logError("isPending(): File Spooler not initialized. queueName="
- + queueProvider.getName());
- return false;
- }
-
- return isPending;
- }
-
- /**
- * Milliseconds from last attempt time
- *
- * @return
- */
- public long getLastAttemptTimeDelta() {
- if (lastAttemptTime == 0) {
- return 0;
- }
- return System.currentTimeMillis() - lastAttemptTime;
- }
-
- synchronized public void stashLogs(AuditEventBase event) {
- if (isDrain) {
- // Stop has been called, so this method shouldn't be called
- logger.error("stashLogs() is called after stop is called. event="
- + event);
- return;
- }
- try {
- isWriting = true;
- PrintWriter logOut = getLogFileStream();
- // Convert event to json
- String jsonStr = MiscUtil.stringify(event);
- logOut.println(jsonStr);
- isPending = true;
- } catch (Exception ex) {
- logger.error("Error writing to file. event=" + event, ex);
- } finally {
- isWriting = false;
- }
-
- }
-
- synchronized public void stashLogs(Collection<AuditEventBase> events) {
- for (AuditEventBase event : events) {
- stashLogs(event);
- }
- flush();
- }
-
- synchronized public void stashLogsString(String event) {
- if (isDrain) {
- // Stop has been called, so this method shouldn't be called
- logger.error("stashLogs() is called after stop is called. event="
- + event);
- return;
- }
- try {
- isWriting = true;
- PrintWriter logOut = getLogFileStream();
- logOut.println(event);
- } catch (Exception ex) {
- logger.error("Error writing to file. event=" + event, ex);
- } finally {
- isWriting = false;
- }
-
- }
-
- synchronized public void stashLogsString(Collection<String> events) {
- for (String event : events) {
- stashLogsString(event);
- }
- flush();
- }
-
- /**
- * This return the current file. If there are not current open output file,
- * then it will return null
- *
- * @return
- * @throws Exception
- */
- synchronized private PrintWriter getOpenLogFileStream() {
- return logWriter;
- }
-
- /**
- * @return
- * @throws Exception
- */
- synchronized private PrintWriter getLogFileStream() throws Exception {
- closeFileIfNeeded();
-
- // Either there are no open log file or the previous one has been rolled
- // over
- if (currentWriterIndexRecord == null) {
- Date currentTime = new Date();
- // Create a new file
- String fileName = MiscUtil.replaceTokens(logFileNameFormat,
- currentTime.getTime());
- String newFileName = fileName;
- File outLogFile = null;
- int i = 0;
- while (true) {
- outLogFile = new File(logFolder, newFileName);
- File archiveLogFile = new File(archiveFolder, newFileName);
- if (!outLogFile.exists() && !archiveLogFile.exists()) {
- break;
- }
- i++;
- int lastDot = fileName.lastIndexOf('.');
- String baseName = fileName.substring(0, lastDot);
- String extension = fileName.substring(lastDot);
- newFileName = baseName + "." + i + extension;
- }
- fileName = newFileName;
- logger.info("Creating new file. queueName="
- + queueProvider.getName() + ", fileName=" + fileName);
- // Open the file
- logWriter = new PrintWriter(new BufferedWriter(new FileWriter(
- outLogFile)));
-
- AuditIndexRecord tmpIndexRecord = new AuditIndexRecord();
-
- tmpIndexRecord.id = MiscUtil.generateUniqueId();
- tmpIndexRecord.filePath = outLogFile.getPath();
- tmpIndexRecord.status = SPOOL_FILE_STATUS.write_inprogress;
- tmpIndexRecord.fileCreateTime = currentTime;
- tmpIndexRecord.lastAttempt = true;
- currentWriterIndexRecord = tmpIndexRecord;
- indexRecords.add(currentWriterIndexRecord);
- saveIndexFile();
-
- } else {
- if (logWriter == null) {
- // This means the process just started. We need to open the file
- // in append mode.
- logger.info("Opening existing file for append. queueName="
- + queueProvider.getName() + ", fileName="
- + currentWriterIndexRecord.filePath);
- logWriter = new PrintWriter(new BufferedWriter(new FileWriter(
- currentWriterIndexRecord.filePath, true)));
- }
- }
- return logWriter;
- }
-
- synchronized private void closeFileIfNeeded() throws FileNotFoundException,
- IOException {
- // Is there file open to write or there are no pending file, then close
- // the active file
- if (currentWriterIndexRecord != null) {
- // Check whether the file needs to rolled
- boolean closeFile = false;
- if (indexRecords.size() == 1) {
- closeFile = true;
- logger.info("Closing file. Only one open file. queueName="
- + queueProvider.getName() + ", fileName="
- + currentWriterIndexRecord.filePath);
- } else if (System.currentTimeMillis()
- - currentWriterIndexRecord.fileCreateTime.getTime() > fileRolloverSec * 1000) {
- closeFile = true;
- logger.info("Closing file. Rolling over. queueName="
- + queueProvider.getName() + ", fileName="
- + currentWriterIndexRecord.filePath);
- }
- if (closeFile) {
- // Roll the file
- if (logWriter != null) {
- logWriter.flush();
- logWriter.close();
- logWriter = null;
- }
- currentWriterIndexRecord.status = SPOOL_FILE_STATUS.pending;
- currentWriterIndexRecord.writeCompleteTime = new Date();
- saveIndexFile();
- logger.info("Adding file to queue. queueName="
- + queueProvider.getName() + ", fileName="
- + currentWriterIndexRecord.filePath);
- indexQueue.add(currentWriterIndexRecord);
- currentWriterIndexRecord = null;
- }
- }
- }
-
- /**
- * Load the index file
- *
- * @throws IOException
- */
- void loadIndexFile() throws IOException {
- logger.info("Loading index file. fileName=" + indexFile.getPath());
- BufferedReader br = new BufferedReader(new FileReader(indexFile));
- indexRecords.clear();
- String line;
- while ((line = br.readLine()) != null) {
- if (!line.isEmpty() && !line.startsWith("#")) {
- AuditIndexRecord record = gson.fromJson(line,
- AuditIndexRecord.class);
- indexRecords.add(record);
- }
- }
- br.close();
- }
-
- synchronized void printIndex() {
- logger.info("INDEX printIndex() ==== START");
- Iterator<AuditIndexRecord> iter = indexRecords.iterator();
- while (iter.hasNext()) {
- AuditIndexRecord record = iter.next();
- logger.info("INDEX=" + record + ", isFileExist="
- + (new File(record.filePath).exists()));
- }
- logger.info("INDEX printIndex() ==== END");
- }
-
- synchronized void removeIndexRecord(AuditIndexRecord indexRecord)
- throws FileNotFoundException, IOException {
- Iterator<AuditIndexRecord> iter = indexRecords.iterator();
- while (iter.hasNext()) {
- AuditIndexRecord record = iter.next();
- if (record.id.equals(indexRecord.id)) {
- logger.info("Removing file from index. file=" + record.filePath
- + ", queueName=" + queueProvider.getName()
- + ", consumer=" + consumerProvider.getName());
-
- iter.remove();
- appendToDoneFile(record);
- }
- }
- saveIndexFile();
- }
-
- synchronized void saveIndexFile() throws FileNotFoundException, IOException {
- PrintWriter out = new PrintWriter(indexFile);
- for (AuditIndexRecord auditIndexRecord : indexRecords) {
- out.println(gson.toJson(auditIndexRecord));
- }
- out.close();
- // printIndex();
-
- }
-
- void appendToDoneFile(AuditIndexRecord indexRecord)
- throws FileNotFoundException, IOException {
- logger.info("Moving to done file. " + indexRecord.filePath
- + ", queueName=" + queueProvider.getName() + ", consumer="
- + consumerProvider.getName());
- String line = gson.toJson(indexRecord);
- PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(
- indexDoneFile, true)));
- out.println(line);
- out.flush();
- out.close();
-
- // Move to archive folder
- File logFile = null;
- File archiveFile = null;
- try {
- logFile = new File(indexRecord.filePath);
- String fileName = logFile.getName();
- archiveFile = new File(archiveFolder, fileName);
- logger.info("Moving logFile " + logFile + " to " + archiveFile);
- logFile.renameTo(archiveFile);
- } catch (Throwable t) {
- logger.error("Error moving log file to archive folder. logFile="
- + logFile + ", archiveFile=" + archiveFile, t);
- }
-
- archiveFile = null;
- try {
- // Remove old files
- File[] logFiles = archiveFolder.listFiles(new FileFilter() {
- public boolean accept(File pathname) {
- return pathname.getName().toLowerCase().endsWith(".log");
- }
- });
-
- if (logFiles.length > maxArchiveFiles) {
- int filesToDelete = logFiles.length - maxArchiveFiles;
- BufferedReader br = new BufferedReader(new FileReader(
- indexDoneFile));
- try {
- int filesDeletedCount = 0;
- while ((line = br.readLine()) != null) {
- if (!line.isEmpty() && !line.startsWith("#")) {
- AuditIndexRecord record = gson.fromJson(line,
- AuditIndexRecord.class);
- logFile = new File(record.filePath);
- String fileName = logFile.getName();
- archiveFile = new File(archiveFolder, fileName);
- if (archiveFile.exists()) {
- logger.info("Deleting archive file "
- + archiveFile);
- boolean ret = archiveFile.delete();
- if (!ret) {
- logger.error("Error deleting archive file. archiveFile="
- + archiveFile);
- }
- filesDeletedCount++;
- if (filesDeletedCount >= filesToDelete) {
- logger.info("Deleted " + filesDeletedCount
- + " files");
- break;
- }
- }
- }
- }
- } finally {
- br.close();
- }
- }
- } catch (Throwable t) {
- logger.error("Error deleting older archive file. archiveFile="
- + archiveFile, t);
- }
-
- }
-
- void logError(String msg) {
- long currTimeMS = System.currentTimeMillis();
- if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) {
- logger.error(msg);
- lastErrorLogMS = currTimeMS;
- }
- }
-
- class AuditIndexRecord {
- String id;
- String filePath;
- int linePosition = 0;
- SPOOL_FILE_STATUS status = SPOOL_FILE_STATUS.write_inprogress;
- Date fileCreateTime;
- Date writeCompleteTime;
- Date doneCompleteTime;
- Date lastSuccessTime;
- Date lastFailedTime;
- int failedAttemptCount = 0;
- boolean lastAttempt = false;
-
- @Override
- public String toString() {
- return "AuditIndexRecord [id=" + id + ", filePath=" + filePath
- + ", linePosition=" + linePosition + ", status=" + status
- + ", fileCreateTime=" + fileCreateTime
- + ", writeCompleteTime=" + writeCompleteTime
- + ", doneCompleteTime=" + doneCompleteTime
- + ", lastSuccessTime=" + lastSuccessTime
- + ", lastFailedTime=" + lastFailedTime
- + ", failedAttemptCount=" + failedAttemptCount
- + ", lastAttempt=" + lastAttempt + "]";
- }
-
- }
-
- class AuditFileSpoolAttempt {
- Date attemptTime;
- String status;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see java.lang.Runnable#run()
- */
- @Override
- public void run() {
- while (true) {
- try {
- // Let's pause between each iteration
- if (currentConsumerIndexRecord == null) {
- currentConsumerIndexRecord = indexQueue.poll(
- retryDestinationMS, TimeUnit.MILLISECONDS);
- } else {
- Thread.sleep(retryDestinationMS);
- }
-
- if (isDrain) {
- // Need to exit
- break;
- }
- if (currentConsumerIndexRecord == null) {
- closeFileIfNeeded();
- continue;
- }
-
- boolean isRemoveIndex = false;
- File consumerFile = new File(
- currentConsumerIndexRecord.filePath);
- if (!consumerFile.exists()) {
- logger.error("Consumer file=" + consumerFile.getPath()
- + " not found.");
- printIndex();
- isRemoveIndex = true;
- } else {
- // Let's open the file to write
- BufferedReader br = new BufferedReader(new FileReader(
- currentConsumerIndexRecord.filePath));
- try {
- int startLine = currentConsumerIndexRecord.linePosition;
- String line;
- int currLine = 0;
- boolean isResumed = false;
- List<String> lines = new ArrayList<String>();
- while ((line = br.readLine()) != null) {
- currLine++;
- if (currLine < startLine) {
- continue;
- }
- lines.add(line);
- if (lines.size() == queueProvider.getMaxBatchSize()) {
- boolean ret = sendEvent(lines,
- currentConsumerIndexRecord, currLine);
- if (!ret) {
- throw new Exception("Destination down");
- } else {
- if (!isResumed) {
- logger.info("Started writing to destination. file="
- + currentConsumerIndexRecord.filePath
- + ", queueName="
- + queueProvider.getName()
- + ", consumer="
- + consumerProvider.getName());
- }
- }
- lines.clear();
- }
- }
- if (lines.size() > 0) {
- boolean ret = sendEvent(lines,
- currentConsumerIndexRecord, currLine);
- if (!ret) {
- throw new Exception("Destination down");
- } else {
- if (!isResumed) {
- logger.info("Started writing to destination. file="
- + currentConsumerIndexRecord.filePath
- + ", queueName="
- + queueProvider.getName()
- + ", consumer="
- + consumerProvider.getName());
- }
- }
- lines.clear();
- }
- logger.info("Done reading file. file="
- + currentConsumerIndexRecord.filePath
- + ", queueName=" + queueProvider.getName()
- + ", consumer=" + consumerProvider.getName());
- // The entire file is read
- currentConsumerIndexRecord.status = SPOOL_FILE_STATUS.done;
- currentConsumerIndexRecord.doneCompleteTime = new Date();
- currentConsumerIndexRecord.lastAttempt = true;
-
- isRemoveIndex = true;
- } catch (Exception ex) {
- isDestDown = true;
- logError("Destination down. queueName="
- + queueProvider.getName() + ", consumer="
- + consumerProvider.getName());
- lastAttemptTime = System.currentTimeMillis();
- // Update the index file
- currentConsumerIndexRecord.lastFailedTime = new Date();
- currentConsumerIndexRecord.failedAttemptCount++;
- currentConsumerIndexRecord.lastAttempt = false;
- saveIndexFile();
- } finally {
- br.close();
- }
- }
- if (isRemoveIndex) {
- // Remove this entry from index
- removeIndexRecord(currentConsumerIndexRecord);
- currentConsumerIndexRecord = null;
- closeFileIfNeeded();
- }
- } catch (Throwable t) {
- logger.error("Exception in destination writing thread.", t);
- }
- }
- logger.info("Exiting file spooler. provider=" + queueProvider.getName()
- + ", consumer=" + consumerProvider.getName());
- }
-
- private boolean sendEvent(List<String> lines, AuditIndexRecord indexRecord,
- int currLine) {
- boolean ret = true;
- try {
- ret = consumerProvider.logJSON(lines);
- if (!ret) {
- // Need to log error after fixed interval
- logError("Error sending logs to consumer. provider="
- + queueProvider.getName() + ", consumer="
- + consumerProvider.getName());
- } else {
- // Update index and save
- indexRecord.linePosition = currLine;
- indexRecord.status = SPOOL_FILE_STATUS.read_inprogress;
- indexRecord.lastSuccessTime = new Date();
- indexRecord.lastAttempt = true;
- saveIndexFile();
-
- if (isDestDown) {
- isDestDown = false;
- logger.info("Destination up now. " + indexRecord.filePath
- + ", queueName=" + queueProvider.getName()
- + ", consumer=" + consumerProvider.getName());
- }
- }
- } catch (Throwable t) {
- logger.error("Error while sending logs to consumer. provider="
- + queueProvider.getName() + ", consumer="
- + consumerProvider.getName() + ", log=" + lines, t);
- }
-
- return ret;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java
index 13b3142..a67f7e0 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java
@@ -24,9 +24,14 @@ import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.destination.FileAuditDestination;
+import org.apache.ranger.audit.destination.HDFSAuditDestination;
import org.apache.ranger.audit.provider.hdfs.HdfsAuditProvider;
import org.apache.ranger.audit.provider.kafka.KafkaAuditProvider;
import org.apache.ranger.audit.provider.solr.SolrAuditProvider;
+import org.apache.ranger.audit.queue.AuditAsyncQueue;
+import org.apache.ranger.audit.queue.AuditBatchQueue;
+import org.apache.ranger.audit.queue.AuditSummaryQueue;
/*
* TODO:
@@ -90,10 +95,11 @@ public class AuditProviderFactory {
LOG.info("AuditProviderFactory: initializing..");
if (mInitDone) {
- LOG.warn("AuditProviderFactory.init(): already initialized!",
+ LOG.warn(
+ "AuditProviderFactory.init(): already initialized! Will try to re-initialize",
new Exception());
- return;
+ // return;
}
mInitDone = true;
@@ -125,7 +131,7 @@ public class AuditProviderFactory {
for (Object propNameObj : props.keySet()) {
String propName = propNameObj.toString();
- if (propName.length() <= AUDIT_DEST_BASE.length() + 1) {
+ if (!propName.startsWith(AUDIT_DEST_BASE)) {
continue;
}
String destName = propName.substring(AUDIT_DEST_BASE.length() + 1);
@@ -152,9 +158,14 @@ public class AuditProviderFactory {
String queueName = MiscUtil.getStringProperty(props,
destPropPrefix + "." + BaseAuditProvider.PROP_QUEUE);
- if( queueName == null || queueName.isEmpty()) {
+ if (queueName == null || queueName.isEmpty()) {
+ LOG.info(destPropPrefix + "."
+ + BaseAuditProvider.PROP_QUEUE
+ + " is not set. Setting queue to batch for "
+ + destName);
queueName = "batch";
}
+ LOG.info("queue for " + destName + " is " + queueName);
if (queueName != null && !queueName.isEmpty()
&& !queueName.equalsIgnoreCase("none")) {
String queuePropPrefix = destPropPrefix + "." + queueName;
@@ -184,24 +195,55 @@ public class AuditProviderFactory {
}
}
if (providers.size() > 0) {
- LOG.info("Using v2 audit configuration");
+ LOG.info("Using v3 audit configuration");
AuditAsyncQueue asyncQueue = new AuditAsyncQueue();
- String propPrefix = BaseAuditProvider.PROP_DEFAULT_PREFIX + "." + "async";
+ String propPrefix = BaseAuditProvider.PROP_DEFAULT_PREFIX + "."
+ + "async";
asyncQueue.init(props, propPrefix);
+ propPrefix = BaseAuditProvider.PROP_DEFAULT_PREFIX;
+ boolean summaryEnabled = MiscUtil.getBooleanProperty(props,
+ propPrefix + "." + "summary" + "." + "enabled", false);
+ AuditSummaryQueue summaryQueue = null;
+ if (summaryEnabled) {
+ LOG.info("AuditSummaryQueue is enabled");
+ summaryQueue = new AuditSummaryQueue();
+ summaryQueue.init(props, propPrefix);
+ asyncQueue.setConsumer(summaryQueue);
+ } else {
+ LOG.info("AuditSummaryQueue is disabled");
+ }
+
if (providers.size() == 1) {
- asyncQueue.setConsumer(providers.get(0));
+ if (summaryEnabled) {
+ LOG.info("Setting " + providers.get(0).getName()
+ + " as consumer to AuditSummaryQueue");
+ summaryQueue.setConsumer(providers.get(0));
+ } else {
+ LOG.info("Setting " + providers.get(0).getName()
+ + " as consumer to " + asyncQueue.getName());
+ asyncQueue.setConsumer(providers.get(0));
+ }
} else {
MultiDestAuditProvider multiDestProvider = new MultiDestAuditProvider();
multiDestProvider.init(props);
multiDestProvider.addAuditProviders(providers);
- asyncQueue.setConsumer(multiDestProvider);
+ if (summaryEnabled) {
+ LOG.info("Setting " + multiDestProvider.getName()
+ + " as consumer to AuditSummaryQueue");
+ summaryQueue.setConsumer(multiDestProvider);
+ } else {
+ LOG.info("Setting " + multiDestProvider.getName()
+ + " as consumer to " + asyncQueue.getName());
+ asyncQueue.setConsumer(multiDestProvider);
+ }
}
mProvider = asyncQueue;
+ LOG.info("Starting " + mProvider.getName());
mProvider.start();
} else {
- LOG.info("No v2 audit configuration found. Trying v1 audit configurations");
+ LOG.info("No v3 audit configuration found. Trying v2 audit configurations");
if (!isEnabled
|| !(isAuditToDbEnabled || isAuditToHdfsEnabled
|| isAuditToKafkaEnabled || isAuditToLog4jEnabled
@@ -356,7 +398,7 @@ public class AuditProviderFactory {
.newInstance();
} catch (Exception e) {
LOG.fatal("Can't instantiate audit class for providerName="
- + providerName + ", className=" + className);
+ + providerName + ", className=" + className, e);
}
} else {
if (providerName.equals("file")) {
@@ -372,7 +414,7 @@ public class AuditProviderFactory {
} else if (providerName.equals("log4j")) {
provider = new Log4jAuditProvider();
} else if (providerName.equals("batch")) {
- provider = new AuditBatchProcessor();
+ provider = new AuditBatchQueue();
} else if (providerName.equals("async")) {
provider = new AuditAsyncQueue();
} else {
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/236f1ba6/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditProvider.java
index 576176c..85c207b 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditProvider.java
@@ -22,6 +22,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
+import org.apache.ranger.audit.queue.AuditFileSpool;
import com.google.gson.GsonBuilder;
[12/12] incubator-ranger git commit: RANGER-397 - Implement reliable
streaming audits to configurable destinations - Incorporate Review Feedback
Posted by bo...@apache.org.
RANGER-397 - Implement reliable streaming audits to configurable
destinations - Incorporate Review Feedback
Project: http://git-wip-us.apache.org/repos/asf/incubator-ranger/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-ranger/commit/4f3cea22
Tree: http://git-wip-us.apache.org/repos/asf/incubator-ranger/tree/4f3cea22
Diff: http://git-wip-us.apache.org/repos/asf/incubator-ranger/diff/4f3cea22
Branch: refs/heads/master
Commit: 4f3cea223b9bb717577732bf050bc78f16e94a69
Parents: 42a0e25
Author: Don Bosco Durai <bo...@apache.org>
Authored: Wed Apr 22 09:49:01 2015 -0700
Committer: Don Bosco Durai <bo...@apache.org>
Committed: Wed Apr 22 09:49:01 2015 -0700
----------------------------------------------------------------------
.../audit/destination/AuditDestination.java | 32 +-
.../audit/destination/FileAuditDestination.java | 33 +-
.../audit/destination/HDFSAuditDestination.java | 50 ++-
.../ranger/audit/model/AuditEventBase.java | 4 +-
.../audit/provider/AsyncAuditProvider.java | 60 +--
.../ranger/audit/provider/AuditHandler.java | 46 ++
.../ranger/audit/provider/AuditProvider.java | 56 ---
.../audit/provider/AuditProviderFactory.java | 142 +++---
.../ranger/audit/provider/BaseAuditHandler.java | 271 ++++++++++++
.../audit/provider/BaseAuditProvider.java | 432 -------------------
.../audit/provider/BufferedAuditProvider.java | 12 +-
.../ranger/audit/provider/DbAuditProvider.java | 10 -
.../audit/provider/DummyAuditProvider.java | 35 +-
.../audit/provider/Log4jAuditProvider.java | 2 -
.../audit/provider/MultiDestAuditProvider.java | 59 +--
.../provider/kafka/KafkaAuditProvider.java | 22 +-
.../audit/provider/solr/SolrAuditProvider.java | 33 +-
.../ranger/audit/queue/AuditAsyncQueue.java | 34 +-
.../ranger/audit/queue/AuditBatchQueue.java | 26 +-
.../ranger/audit/queue/AuditFileSpool.java | 57 ++-
.../apache/ranger/audit/queue/AuditQueue.java | 174 ++++++++
.../ranger/audit/queue/AuditSummaryQueue.java | 49 +--
.../apache/ranger/audit/test/TestEvents.java | 4 +-
.../org/apache/ranger/audit/TestAuditQueue.java | 98 +++--
.../org/apache/ranger/audit/TestConsumer.java | 46 +-
25 files changed, 815 insertions(+), 972 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java
index 25c0220..9db8937 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java
@@ -23,13 +23,13 @@ import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.ranger.audit.provider.BaseAuditProvider;
+import org.apache.ranger.audit.provider.BaseAuditHandler;
/**
* This class needs to be extended by anyone who wants to build custom
* destination
*/
-public abstract class AuditDestination extends BaseAuditProvider {
+public abstract class AuditDestination extends BaseAuditHandler {
private static final Log logger = LogFactory.getLog(AuditDestination.class);
public AuditDestination() {
@@ -51,21 +51,31 @@ public abstract class AuditDestination extends BaseAuditProvider {
/*
* (non-Javadoc)
*
- * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
+ * @see org.apache.ranger.audit.provider.AuditProvider#flush()
*/
@Override
- public boolean isFlushPending() {
- return false;
+ public void flush() {
+
}
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#flush()
- */
@Override
- public void flush() {
+ public void start() {
+
+ }
+
+ @Override
+ public void stop() {
+
+ }
+ @Override
+ public void waitToComplete() {
+
}
+ @Override
+ public void waitToComplete(long timeout) {
+
+ }
+
}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java
index 1ccfd5f..a132cdf 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java
@@ -21,9 +21,7 @@ package org.apache.ranger.audit.destination;
import java.io.BufferedWriter;
import java.io.File;
-import java.io.FileNotFoundException;
import java.io.FileWriter;
-import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collection;
@@ -107,7 +105,12 @@ public class FileAuditDestination extends AuditDestination {
}
@Override
- public boolean logJSON(Collection<String> events) {
+ synchronized public boolean logJSON(Collection<String> events) {
+ if (isStopped) {
+ logError("log() called after stop was requested. name=" + getName());
+ return false;
+ }
+
try {
PrintWriter out = getLogFileStream();
for (String event : events) {
@@ -128,7 +131,7 @@ public class FileAuditDestination extends AuditDestination {
* org.apache.ranger.audit.provider.AuditProvider#log(java.util.Collection)
*/
@Override
- synchronized public boolean log(Collection<AuditEventBase> events) {
+ public boolean log(Collection<AuditEventBase> events) {
if (isStopped) {
logError("log() called after stop was requested. name=" + getName());
return false;
@@ -158,11 +161,16 @@ public class FileAuditDestination extends AuditDestination {
@Override
synchronized public void stop() {
+ isStopped = true;
if (logWriter != null) {
- logWriter.flush();
- logWriter.close();
+ try {
+ logWriter.flush();
+ logWriter.close();
+ } catch (Throwable t) {
+ logger.error("Error on closing log writter. Exception will be ignored. name="
+ + getName() + ", fileName=" + currentFileName);
+ }
logWriter = null;
- isStopped = true;
}
}
@@ -214,15 +222,20 @@ public class FileAuditDestination extends AuditDestination {
return logWriter;
}
- private void closeFileIfNeeded() throws FileNotFoundException, IOException {
+ private void closeFileIfNeeded() {
if (logWriter == null) {
return;
}
if (System.currentTimeMillis() - fileCreateTime.getTime() > fileRolloverSec * 1000) {
logger.info("Closing file. Rolling over. name=" + getName()
+ ", fileName=" + currentFileName);
- logWriter.flush();
- logWriter.close();
+ try {
+ logWriter.flush();
+ logWriter.close();
+ } catch (Throwable t) {
+ logger.error("Error on closing log writter. Exception will be ignored. name="
+ + getName() + ", fileName=" + currentFileName);
+ }
logWriter = null;
currentFileName = null;
}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java
index 706eb8e..6ca4fce 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java
@@ -74,6 +74,12 @@ public class HDFSAuditDestination extends AuditDestination {
// Initial folder and file properties
String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
+ "." + PROP_HDFS_DIR);
+ if (logFolderProp == null || logFolderProp.isEmpty()) {
+ logger.fatal("File destination folder is not configured. Please set "
+ + propPrefix + "." + PROP_HDFS_DIR + ". name=" + getName());
+ return;
+ }
+
String logSubFolder = MiscUtil.getStringProperty(props, propPrefix
+ "." + PROP_HDFS_SUBDIR);
if (logSubFolder == null || logSubFolder.isEmpty()) {
@@ -89,12 +95,6 @@ public class HDFSAuditDestination extends AuditDestination {
logFileNameFormat = "%app-type%_ranger_audit_%hostname%" + ".log";
}
- if (logFolderProp == null || logFolderProp.isEmpty()) {
- logger.fatal("File destination folder is not configured. Please set "
- + propPrefix + "." + PROP_HDFS_DIR + ". name=" + getName());
- return;
- }
-
logFolder = logFolderProp + "/" + logSubFolder;
logger.info("logFolder=" + logFolder + ", destName=" + getName());
logger.info("logFileNameFormat=" + logFileNameFormat + ", destName="
@@ -104,7 +104,12 @@ public class HDFSAuditDestination extends AuditDestination {
}
@Override
- public boolean logJSON(Collection<String> events) {
+ synchronized public boolean logJSON(Collection<String> events) {
+ if (isStopped) {
+ logError("log() called after stop was requested. name=" + getName());
+ return false;
+ }
+
try {
PrintWriter out = getLogFileStream();
for (String event : events) {
@@ -125,7 +130,7 @@ public class HDFSAuditDestination extends AuditDestination {
* org.apache.ranger.audit.provider.AuditProvider#log(java.util.Collection)
*/
@Override
- synchronized public boolean log(Collection<AuditEventBase> events) {
+ public boolean log(Collection<AuditEventBase> events) {
if (isStopped) {
logError("log() called after stop was requested. name=" + getName());
return false;
@@ -155,15 +160,16 @@ public class HDFSAuditDestination extends AuditDestination {
@Override
synchronized public void stop() {
- try {
- if (logWriter != null) {
+ isStopped = true;
+ if (logWriter != null) {
+ try {
logWriter.flush();
logWriter.close();
- logWriter = null;
- isStopped = true;
+ } catch (Throwable t) {
+ logger.error("Error on closing log writter. Exception will be ignored. name="
+ + getName() + ", fileName=" + currentFileName);
}
- } catch (Throwable t) {
- logger.error("Error closing HDFS file.", t);
+ logWriter = null;
}
}
@@ -198,9 +204,11 @@ public class HDFSAuditDestination extends AuditDestination {
String extension = defaultPath.substring(lastDot);
fullPath = baseName + "." + i + extension;
hdfPath = new Path(fullPath);
- logger.info("Checking whether log file exists. hdfPath=" + fullPath);
+ logger.info("Checking whether log file exists. hdfPath="
+ + fullPath);
}
- logger.info("Log file doesn't exists. Will create and use it. hdfPath=" + fullPath);
+ logger.info("Log file doesn't exists. Will create and use it. hdfPath="
+ + fullPath);
// Create parent folders
createParents(hdfPath, fileSystem);
@@ -234,8 +242,14 @@ public class HDFSAuditDestination extends AuditDestination {
if (System.currentTimeMillis() - fileCreateTime.getTime() > fileRolloverSec * 1000) {
logger.info("Closing file. Rolling over. name=" + getName()
+ ", fileName=" + currentFileName);
- logWriter.flush();
- logWriter.close();
+ try {
+ logWriter.flush();
+ logWriter.close();
+ } catch (Throwable t) {
+ logger.error("Error on closing log writter. Exception will be ignored. name="
+ + getName() + ", fileName=" + currentFileName);
+ }
+
logWriter = null;
currentFileName = null;
}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java
index 39a2578..2c6a87f 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java
@@ -32,8 +32,8 @@ public abstract class AuditEventBase {
public abstract String getEventKey();
public abstract Date getEventTime ();
- public abstract void setEventCount(long frequencyCount);
- public abstract void setEventDurationMS(long frequencyDurationMS);
+ public abstract void setEventCount(long eventCount);
+ public abstract void setEventDurationMS(long eventDurationMS);
protected String trim(String str, int len) {
String ret = str;
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java
index 53adc86..c3a0c78 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java
@@ -68,7 +68,7 @@ public class AsyncAuditProvider extends MultiDestAuditProvider implements
mQueue = new ArrayBlockingQueue<AuditEventBase>(mMaxQueueSize);
}
- public AsyncAuditProvider(String name, int maxQueueSize, int maxFlushInterval, AuditProvider provider) {
+ public AsyncAuditProvider(String name, int maxQueueSize, int maxFlushInterval, AuditHandler provider) {
this(name, maxQueueSize, maxFlushInterval);
addAuditProvider(provider);
@@ -174,21 +174,21 @@ public class AsyncAuditProvider extends MultiDestAuditProvider implements
while(ret == null) {
logSummaryIfRequired();
- if (mMaxFlushInterval > 0 && isFlushPending()) {
- long timeTillNextFlush = getTimeTillNextFlush();
-
- if (timeTillNextFlush <= 0) {
- break; // force flush
- }
-
- ret = mQueue.poll(timeTillNextFlush, TimeUnit.MILLISECONDS);
- } else {
+// if (mMaxFlushInterval > 0 && isFlushPending()) {
+// long timeTillNextFlush = getTimeTillNextFlush();
+//
+// if (timeTillNextFlush <= 0) {
+// break; // force flush
+// }
+//
+// ret = mQueue.poll(timeTillNextFlush, TimeUnit.MILLISECONDS);
+// } else {
// Let's wake up for summary logging
long waitTime = intervalLogDurationMS - (System.currentTimeMillis() - lastIntervalLogTime);
waitTime = waitTime <= 0 ? intervalLogDurationMS : waitTime;
ret = mQueue.poll(waitTime, TimeUnit.MILLISECONDS);
- }
+// }
}
if(ret != null) {
@@ -246,23 +246,23 @@ public class AsyncAuditProvider extends MultiDestAuditProvider implements
LOG.debug("<== AsyncAuditProvider.waitToComplete()");
}
- private long getTimeTillNextFlush() {
- long timeTillNextFlush = mMaxFlushInterval;
-
- if (mMaxFlushInterval > 0) {
- long lastFlushTime = getLastFlushTime();
-
- if (lastFlushTime != 0) {
- long timeSinceLastFlush = System.currentTimeMillis()
- - lastFlushTime;
-
- if (timeSinceLastFlush >= mMaxFlushInterval)
- timeTillNextFlush = 0;
- else
- timeTillNextFlush = mMaxFlushInterval - timeSinceLastFlush;
- }
- }
-
- return timeTillNextFlush;
- }
+// private long getTimeTillNextFlush() {
+// long timeTillNextFlush = mMaxFlushInterval;
+//
+// if (mMaxFlushInterval > 0) {
+// long lastFlushTime = getLastFlushTime();
+//
+// if (lastFlushTime != 0) {
+// long timeSinceLastFlush = System.currentTimeMillis()
+// - lastFlushTime;
+//
+// if (timeSinceLastFlush >= mMaxFlushInterval)
+// timeTillNextFlush = 0;
+// else
+// timeTillNextFlush = mMaxFlushInterval - timeSinceLastFlush;
+// }
+// }
+//
+// return timeTillNextFlush;
+// }
}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java
new file mode 100644
index 0000000..7b51f1d
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ranger.audit.provider;
+
+import java.util.Collection;
+import java.util.Properties;
+
+import org.apache.ranger.audit.model.AuditEventBase;
+
+public interface AuditHandler {
+ public boolean log(AuditEventBase event);
+ public boolean log(Collection<AuditEventBase> events);
+
+ public boolean logJSON(String event);
+ public boolean logJSON(Collection<String> events);
+
+ public void init(Properties prop);
+ public void init(Properties prop, String basePropertyName);
+ public void start();
+ public void stop();
+ public void waitToComplete();
+ public void waitToComplete(long timeout);
+
+ /**
+ * Name for this provider. Used only during logging. Uniqueness is not guaranteed
+ */
+ public String getName();
+
+ public void flush();
+}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProvider.java
deleted file mode 100644
index 0e38624..0000000
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProvider.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ranger.audit.provider;
-
-import java.util.Collection;
-import java.util.Properties;
-
-import org.apache.ranger.audit.model.AuditEventBase;
-
-public interface AuditProvider {
- public boolean log(AuditEventBase event);
- public boolean log(Collection<AuditEventBase> events);
-
- public boolean logJSON(String event);
- public boolean logJSON(Collection<String> events);
-
- public void init(Properties prop);
- public void init(Properties prop, String basePropertyName);
- public void start();
- public void stop();
- public void waitToComplete();
- public void waitToComplete(long timeout);
-
- /**
- * Name for this provider. Used only during logging. Uniqueness is not guaranteed
- */
- public String getName();
-
- /**
- * If this AuditProvider in the state of shutdown
- * @return
- */
- public boolean isDrain();
-
- public int getMaxBatchSize();
- public int getMaxBatchInterval();
- public boolean isFlushPending();
- public long getLastFlushTime();
- public void flush();
-}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java
index a67f7e0..7b2b52b 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java
@@ -31,6 +31,7 @@ import org.apache.ranger.audit.provider.kafka.KafkaAuditProvider;
import org.apache.ranger.audit.provider.solr.SolrAuditProvider;
import org.apache.ranger.audit.queue.AuditAsyncQueue;
import org.apache.ranger.audit.queue.AuditBatchQueue;
+import org.apache.ranger.audit.queue.AuditQueue;
import org.apache.ranger.audit.queue.AuditSummaryQueue;
/*
@@ -58,7 +59,7 @@ public class AuditProviderFactory {
private static AuditProviderFactory sFactory;
- private AuditProvider mProvider = null;
+ private AuditHandler mProvider = null;
private boolean mInitDone = false;
private AuditProviderFactory() {
@@ -79,11 +80,11 @@ public class AuditProviderFactory {
return sFactory;
}
- public static AuditProvider getAuditProvider() {
+ public static AuditHandler getAuditProvider() {
return AuditProviderFactory.getInstance().getProvider();
}
- public AuditProvider getProvider() {
+ public AuditHandler getProvider() {
return mProvider;
}
@@ -118,7 +119,7 @@ public class AuditProviderFactory {
boolean isAuditToSolrEnabled = MiscUtil.getBooleanProperty(props,
AUDIT_SOLR_IS_ENABLED_PROP, false);
- List<AuditProvider> providers = new ArrayList<AuditProvider>();
+ List<AuditHandler> providers = new ArrayList<AuditHandler>();
// TODO: Delete me
for (Object propNameObj : props.keySet()) {
@@ -150,17 +151,16 @@ public class AuditProviderFactory {
for (String destName : destNameList) {
String destPropPrefix = AUDIT_DEST_BASE + "." + destName;
- AuditProvider destProvider = getProviderFromConfig(props,
- destPropPrefix, destName);
+ AuditHandler destProvider = getProviderFromConfig(props,
+ destPropPrefix, destName, null);
if (destProvider != null) {
destProvider.init(props, destPropPrefix);
String queueName = MiscUtil.getStringProperty(props,
- destPropPrefix + "." + BaseAuditProvider.PROP_QUEUE);
+ destPropPrefix + "." + AuditQueue.PROP_QUEUE);
if (queueName == null || queueName.isEmpty()) {
- LOG.info(destPropPrefix + "."
- + BaseAuditProvider.PROP_QUEUE
+ LOG.info(destPropPrefix + "." + AuditQueue.PROP_QUEUE
+ " is not set. Setting queue to batch for "
+ destName);
queueName = "batch";
@@ -169,16 +169,15 @@ public class AuditProviderFactory {
if (queueName != null && !queueName.isEmpty()
&& !queueName.equalsIgnoreCase("none")) {
String queuePropPrefix = destPropPrefix + "." + queueName;
- AuditProvider queueProvider = getProviderFromConfig(props,
- queuePropPrefix, queueName);
+ AuditHandler queueProvider = getProviderFromConfig(props,
+ queuePropPrefix, queueName, destProvider);
if (queueProvider != null) {
- if (queueProvider instanceof BaseAuditProvider) {
- BaseAuditProvider qProvider = (BaseAuditProvider) queueProvider;
- qProvider.setConsumer(destProvider);
+ if (queueProvider instanceof AuditQueue) {
+ AuditQueue qProvider = (AuditQueue) queueProvider;
qProvider.init(props, queuePropPrefix);
providers.add(queueProvider);
} else {
- LOG.fatal("Provider queue doesn't extend BaseAuditProvider destination "
+ LOG.fatal("Provider queue doesn't extend AuditQueue. Destination="
+ destName
+ " can't be created. queueName="
+ queueName);
@@ -196,51 +195,51 @@ public class AuditProviderFactory {
}
if (providers.size() > 0) {
LOG.info("Using v3 audit configuration");
- AuditAsyncQueue asyncQueue = new AuditAsyncQueue();
- String propPrefix = BaseAuditProvider.PROP_DEFAULT_PREFIX + "."
- + "async";
- asyncQueue.init(props, propPrefix);
+ AuditHandler consumer = providers.get(0);
+
+ // Possible pipeline is:
+ // async_queue -> summary_queue -> multidestination -> batch_queue
+ // -> hdfs_destination
+ // -> batch_queue -> solr_destination
+ // -> batch_queue -> kafka_destination
+ // Above, up to multidestination, the providers are same, then it
+ // branches out in parallel.
+
+ // Set the providers in the reverse order e.g.
+
+ if (providers.size() > 1) {
+ // If there are more than one destination, then we need multi
+ // destination to process it in parallel
+ LOG.info("MultiDestAuditProvider is used. Destination count="
+ + providers.size());
+ MultiDestAuditProvider multiDestProvider = new MultiDestAuditProvider();
+ multiDestProvider.init(props);
+ multiDestProvider.addAuditProviders(providers);
+ consumer = multiDestProvider;
+ }
- propPrefix = BaseAuditProvider.PROP_DEFAULT_PREFIX;
+ // Let's see if Summary is enabled, then summarize before sending it
+ // downstream
+ String propPrefix = BaseAuditHandler.PROP_DEFAULT_PREFIX;
boolean summaryEnabled = MiscUtil.getBooleanProperty(props,
propPrefix + "." + "summary" + "." + "enabled", false);
AuditSummaryQueue summaryQueue = null;
if (summaryEnabled) {
LOG.info("AuditSummaryQueue is enabled");
- summaryQueue = new AuditSummaryQueue();
+ summaryQueue = new AuditSummaryQueue(consumer);
summaryQueue.init(props, propPrefix);
- asyncQueue.setConsumer(summaryQueue);
+ consumer = summaryQueue;
} else {
LOG.info("AuditSummaryQueue is disabled");
}
- if (providers.size() == 1) {
- if (summaryEnabled) {
- LOG.info("Setting " + providers.get(0).getName()
- + " as consumer to AuditSummaryQueue");
- summaryQueue.setConsumer(providers.get(0));
- } else {
- LOG.info("Setting " + providers.get(0).getName()
- + " as consumer to " + asyncQueue.getName());
- asyncQueue.setConsumer(providers.get(0));
- }
- } else {
- MultiDestAuditProvider multiDestProvider = new MultiDestAuditProvider();
- multiDestProvider.init(props);
- multiDestProvider.addAuditProviders(providers);
- if (summaryEnabled) {
- LOG.info("Setting " + multiDestProvider.getName()
- + " as consumer to AuditSummaryQueue");
- summaryQueue.setConsumer(multiDestProvider);
- } else {
- LOG.info("Setting " + multiDestProvider.getName()
- + " as consumer to " + asyncQueue.getName());
- asyncQueue.setConsumer(multiDestProvider);
- }
- }
+ // Create the AsysnQueue
+ AuditAsyncQueue asyncQueue = new AuditAsyncQueue(consumer);
+ propPrefix = BaseAuditHandler.PROP_DEFAULT_PREFIX + "." + "async";
+ asyncQueue.init(props, propPrefix);
mProvider = asyncQueue;
- LOG.info("Starting " + mProvider.getName());
+ LOG.info("Starting audit queue " + mProvider.getName());
mProvider.start();
} else {
LOG.info("No v3 audit configuration found. Trying v2 audit configurations");
@@ -315,9 +314,7 @@ public class AuditProviderFactory {
if (kafkaProvider.isAsync()) {
AsyncAuditProvider asyncProvider = new AsyncAuditProvider(
- "MyKafkaAuditProvider",
- kafkaProvider.getMaxQueueSize(),
- kafkaProvider.getMaxBatchInterval(), kafkaProvider);
+ "MyKafkaAuditProvider", 1000, 1000, kafkaProvider);
providers.add(asyncProvider);
} else {
providers.add(kafkaProvider);
@@ -331,9 +328,7 @@ public class AuditProviderFactory {
if (solrProvider.isAsync()) {
AsyncAuditProvider asyncProvider = new AsyncAuditProvider(
- "MySolrAuditProvider",
- solrProvider.getMaxQueueSize(),
- solrProvider.getMaxBatchInterval(), solrProvider);
+ "MySolrAuditProvider", 1000, 1000, solrProvider);
providers.add(asyncProvider);
} else {
providers.add(solrProvider);
@@ -387,18 +382,26 @@ public class AuditProviderFactory {
Runtime.getRuntime().addShutdownHook(jvmShutdownHook);
}
- private AuditProvider getProviderFromConfig(Properties props,
- String propPrefix, String providerName) {
- AuditProvider provider = null;
+ private AuditHandler getProviderFromConfig(Properties props,
+ String propPrefix, String providerName, AuditHandler consumer) {
+ AuditHandler provider = null;
String className = MiscUtil.getStringProperty(props, propPrefix + "."
- + BaseAuditProvider.PROP_CLASS_NAME);
+ + BaseAuditHandler.PROP_CLASS_NAME);
if (className != null && !className.isEmpty()) {
try {
- provider = (AuditProvider) Class.forName(className)
- .newInstance();
+ Class<?> handlerClass = Class.forName(className);
+ if (handlerClass.isAssignableFrom(AuditQueue.class)) {
+ // Queue class needs consumer
+ handlerClass.getDeclaredConstructor(AuditHandler.class)
+ .newInstance(consumer);
+ } else {
+ provider = (AuditHandler) Class.forName(className)
+ .newInstance();
+ }
} catch (Exception e) {
LOG.fatal("Can't instantiate audit class for providerName="
- + providerName + ", className=" + className, e);
+ + providerName + ", className=" + className
+ + ", propertyPrefix=" + propPrefix, e);
}
} else {
if (providerName.equals("file")) {
@@ -414,25 +417,32 @@ public class AuditProviderFactory {
} else if (providerName.equals("log4j")) {
provider = new Log4jAuditProvider();
} else if (providerName.equals("batch")) {
- provider = new AuditBatchQueue();
+ provider = new AuditBatchQueue(consumer);
} else if (providerName.equals("async")) {
- provider = new AuditAsyncQueue();
+ provider = new AuditAsyncQueue(consumer);
} else {
LOG.error("Provider name doesn't have any class associated with it. providerName="
- + providerName);
+ + providerName + ", propertyPrefix=" + propPrefix);
+ }
+ }
+ if (provider != null && provider instanceof AuditQueue) {
+ if (consumer == null) {
+ LOG.fatal("consumer can't be null for AuditQueue. queue="
+ + provider.getName() + ", propertyPrefix=" + propPrefix);
+ provider = null;
}
}
return provider;
}
- private AuditProvider getDefaultProvider() {
+ private AuditHandler getDefaultProvider() {
return new DummyAuditProvider();
}
private static class JVMShutdownHook extends Thread {
- AuditProvider mProvider;
+ AuditHandler mProvider;
- public JVMShutdownHook(AuditProvider provider) {
+ public JVMShutdownHook(AuditHandler provider) {
mProvider = provider;
}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java
new file mode 100644
index 0000000..601650e
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ranger.audit.provider;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.model.AuthzAuditEvent;
+import com.google.gson.GsonBuilder;
+
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Properties;
+
+public abstract class BaseAuditHandler implements AuditHandler {
+ private static final Log LOG = LogFactory.getLog(BaseAuditHandler.class);
+
+ private static final String AUDIT_LOG_FAILURE_REPORT_MIN_INTERVAL_PROP = "xasecure.audit.log.failure.report.min.interval.ms";
+
+ private int mLogFailureReportMinIntervalInMs = 60 * 1000;
+
+ private AtomicLong mFailedLogLastReportTime = new AtomicLong(0);
+ private AtomicLong mFailedLogCountSinceLastReport = new AtomicLong(0);
+ private AtomicLong mFailedLogCountLifeTime = new AtomicLong(0);
+
+ public static final String PROP_NAME = "name";
+ public static final String PROP_CLASS_NAME = "classname";
+
+ public static final String PROP_DEFAULT_PREFIX = "xasecure.audit.provider";
+
+ protected String propPrefix = PROP_DEFAULT_PREFIX;
+
+ protected String providerName = null;
+
+ protected int failedRetryTimes = 3;
+ protected int failedRetrySleep = 3 * 1000;
+
+ int errorLogIntervalMS = 30 * 1000; // Every 30 seconds
+ long lastErrorLogMS = 0;
+
+ protected Properties props = null;
+
+ @Override
+ public void init(Properties props) {
+ init(props, null);
+ }
+
+ @Override
+ public void init(Properties props, String basePropertyName) {
+ LOG.info("BaseAuditProvider.init()");
+ this.props = props;
+ if (basePropertyName != null) {
+ propPrefix = basePropertyName;
+ }
+ LOG.info("propPrefix=" + propPrefix);
+ // Get final token
+ List<String> tokens = MiscUtil.toArray(propPrefix, ".");
+ String finalToken = tokens.get(tokens.size() - 1);
+
+ String name = MiscUtil.getStringProperty(props, basePropertyName + "."
+ + PROP_NAME);
+ if (name != null && !name.isEmpty()) {
+ providerName = name;
+ }
+ if (providerName == null) {
+ providerName = finalToken;
+ LOG.info("Using providerName from property prefix. providerName="
+ + providerName);
+ }
+ LOG.info("providerName=" + providerName);
+
+ try {
+ new GsonBuilder().setDateFormat("yyyyMMdd-HH:mm:ss.SSS-Z").create();
+ } catch (Throwable excp) {
+ LOG.warn(
+ "Log4jAuditProvider.init(): failed to create GsonBuilder object. events will be formated using toString(), instead of Json",
+ excp);
+ }
+
+ mLogFailureReportMinIntervalInMs = MiscUtil.getIntProperty(props,
+ AUDIT_LOG_FAILURE_REPORT_MIN_INTERVAL_PROP, 60 * 1000);
+
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger.
+ * audit.model.AuditEventBase)
+ */
+ @Override
+ public boolean log(AuditEventBase event) {
+ List<AuditEventBase> eventList = new ArrayList<AuditEventBase>();
+ eventList.add(event);
+ return log(eventList);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#logJSON(java.lang.String)
+ */
+ @Override
+ public boolean logJSON(String event) {
+ AuditEventBase eventObj = MiscUtil.fromJson(event,
+ AuthzAuditEvent.class);
+ return log(eventObj);
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see
+ * org.apache.ranger.audit.provider.AuditProvider#logJSON(java.util.Collection
+ * )
+ */
+ @Override
+ public boolean logJSON(Collection<String> events) {
+ boolean ret = true;
+ for (String event : events) {
+ ret = logJSON(event);
+ if (!ret) {
+ break;
+ }
+ }
+ return ret;
+ }
+
+ public void setName(String name) {
+ providerName = name;
+ }
+
+ @Override
+ public String getName() {
+ return providerName;
+ }
+
+ public void logFailedEvent(AuditEventBase event) {
+ logFailedEvent(event, null);
+ }
+
+ public void logError(String msg) {
+ long currTimeMS = System.currentTimeMillis();
+ if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) {
+ LOG.error(msg);
+ lastErrorLogMS = currTimeMS;
+ }
+ }
+
+ public void logError(String msg, Throwable ex) {
+ long currTimeMS = System.currentTimeMillis();
+ if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) {
+ LOG.error(msg, ex);
+ lastErrorLogMS = currTimeMS;
+ }
+ }
+
+ public String getTimeDiffStr(long time1, long time2) {
+ long timeInMs = Math.abs(time1 - time2);
+ return formatIntervalForLog(timeInMs);
+ }
+
+ public String formatIntervalForLog(long timeInMs) {
+ long hours = timeInMs / (60 * 60 * 1000);
+ long minutes = (timeInMs / (60 * 1000)) % 60;
+ long seconds = (timeInMs % (60 * 1000)) / 1000;
+ long mSeconds = (timeInMs % (1000));
+
+ if (hours > 0)
+ return String.format("%02d:%02d:%02d.%03d hours", hours, minutes,
+ seconds, mSeconds);
+ else if (minutes > 0)
+ return String.format("%02d:%02d.%03d minutes", minutes, seconds,
+ mSeconds);
+ else if (seconds > 0)
+ return String.format("%02d.%03d seconds", seconds, mSeconds);
+ else
+ return String.format("%03d milli-seconds", mSeconds);
+ }
+
+ public void logFailedEvent(AuditEventBase event, Throwable excp) {
+ long now = System.currentTimeMillis();
+
+ long timeSinceLastReport = now - mFailedLogLastReportTime.get();
+ long countSinceLastReport = mFailedLogCountSinceLastReport
+ .incrementAndGet();
+ long countLifeTime = mFailedLogCountLifeTime.incrementAndGet();
+
+ if (timeSinceLastReport >= mLogFailureReportMinIntervalInMs) {
+ mFailedLogLastReportTime.set(now);
+ mFailedLogCountSinceLastReport.set(0);
+
+ if (excp != null) {
+ LOG.warn(
+ "failed to log audit event: "
+ + MiscUtil.stringify(event), excp);
+ } else {
+ LOG.warn("failed to log audit event: "
+ + MiscUtil.stringify(event));
+ }
+
+ if (countLifeTime > 1) { // no stats to print for the 1st failure
+ LOG.warn("Log failure count: " + countSinceLastReport
+ + " in past "
+ + formatIntervalForLog(timeSinceLastReport) + "; "
+ + countLifeTime + " during process lifetime");
+ }
+ }
+ }
+
+ public void logFailedEvent(Collection<AuditEventBase> events, Throwable excp) {
+ for (AuditEventBase event : events) {
+ logFailedEvent(event, excp);
+ }
+ }
+
+ public void logFailedEventJSON(String event, Throwable excp) {
+ long now = System.currentTimeMillis();
+
+ long timeSinceLastReport = now - mFailedLogLastReportTime.get();
+ long countSinceLastReport = mFailedLogCountSinceLastReport
+ .incrementAndGet();
+ long countLifeTime = mFailedLogCountLifeTime.incrementAndGet();
+
+ if (timeSinceLastReport >= mLogFailureReportMinIntervalInMs) {
+ mFailedLogLastReportTime.set(now);
+ mFailedLogCountSinceLastReport.set(0);
+
+ if (excp != null) {
+ LOG.warn("failed to log audit event: " + event, excp);
+ } else {
+ LOG.warn("failed to log audit event: " + event);
+ }
+
+ if (countLifeTime > 1) { // no stats to print for the 1st failure
+ LOG.warn("Log failure count: " + countSinceLastReport
+ + " in past "
+ + formatIntervalForLog(timeSinceLastReport) + "; "
+ + countLifeTime + " during process lifetime");
+ }
+ }
+ }
+
+ public void logFailedEventJSON(Collection<String> events, Throwable excp) {
+ for (String event : events) {
+ logFailedEventJSON(event, excp);
+ }
+ }
+
+
+}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditProvider.java
deleted file mode 100644
index 85c207b..0000000
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditProvider.java
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.ranger.audit.provider;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.ranger.audit.model.AuditEventBase;
-import org.apache.ranger.audit.model.AuthzAuditEvent;
-import org.apache.ranger.audit.queue.AuditFileSpool;
-
-import com.google.gson.GsonBuilder;
-
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.Properties;
-
-public abstract class BaseAuditProvider implements AuditProvider {
- private static final Log LOG = LogFactory.getLog(BaseAuditProvider.class);
-
- private static final String AUDIT_LOG_FAILURE_REPORT_MIN_INTERVAL_PROP = "xasecure.audit.log.failure.report.min.interval.ms";
- public static final int AUDIT_MAX_QUEUE_SIZE_DEFAULT = 1024 * 1024;
- public static final int AUDIT_BATCH_INTERVAL_DEFAULT_MS = 1000;
- public static final int AUDIT_BATCH_SIZE_DEFAULT = 1000;
-
- private AtomicLong lifeTimeInLogCount = new AtomicLong(0);
-
- private int mLogFailureReportMinIntervalInMs = 60 * 1000;
-
- private AtomicLong mFailedLogLastReportTime = new AtomicLong(0);
- private AtomicLong mFailedLogCountSinceLastReport = new AtomicLong(0);
- private AtomicLong mFailedLogCountLifeTime = new AtomicLong(0);
-
- public static final String PROP_NAME = "name";
- public static final String PROP_CLASS_NAME = "classname";
- public static final String PROP_QUEUE = "queue";
-
- public static final String PROP_BATCH_SIZE = "batch.size";
- public static final String PROP_QUEUE_SIZE = "queue.size";
- public static final String PROP_BATCH_INTERVAL = "batch.interval.ms";
-
- public static final String PROP_FILE_SPOOL_ENABLE = "filespool.enable";
- public static final String PROP_FILE_SPOOL_WAIT_FOR_FULL_DRAIN = "filespool.drain.full.wait.ms";
- public static final String PROP_FILE_SPOOL_QUEUE_THRESHOLD = "filespool.drain.threshold.percent";
-
- public static final String PROP_DEFAULT_PREFIX = "xasecure.audit.provider";
-
- private boolean isDrain = false;
- private String providerName = null;
-
- private int maxQueueSize = AUDIT_MAX_QUEUE_SIZE_DEFAULT;
- private int maxBatchInterval = AUDIT_BATCH_INTERVAL_DEFAULT_MS;
- private int maxBatchSize = AUDIT_BATCH_SIZE_DEFAULT;
-
- protected int failedRetryTimes = 3;
- protected int failedRetrySleep = 3 * 1000;
-
- protected AuditProvider consumer = null;
- protected AuditFileSpool fileSpooler = null;
-
- protected boolean fileSpoolerEnabled = false;
- protected int fileSpoolMaxWaitTime = 5 * 60 * 1000; // Default 5 minutes
- protected int fileSpoolDrainThresholdPercent = 80;
-
- int errorLogIntervalMS = 30 * 1000; // Every 30 seconds
- long lastErrorLogMS = 0;
-
- protected Properties props = null;
-
- public BaseAuditProvider() {
- }
-
- public BaseAuditProvider(AuditProvider consumer) {
- this.consumer = consumer;
- }
-
- @Override
- public void init(Properties props) {
- init(props, null);
- }
-
- @Override
- public void init(Properties props, String basePropertyName) {
- LOG.info("BaseAuditProvider.init()");
- this.props = props;
- String propPrefix = PROP_DEFAULT_PREFIX;
- if (basePropertyName != null) {
- propPrefix = basePropertyName;
- }
- LOG.info("propPrefix=" + propPrefix);
- // Get final token
- List<String> tokens = MiscUtil.toArray(propPrefix, ".");
- String finalToken = tokens.get(tokens.size() - 1);
-
- String name = MiscUtil.getStringProperty(props, basePropertyName + "."
- + PROP_NAME);
- if (name != null && !name.isEmpty()) {
- providerName = name;
- }
- if (providerName == null) {
- providerName = finalToken;
- LOG.info("Using providerName from property prefix. providerName="
- + providerName);
- }
- LOG.info("providerName=" + providerName);
-
- setMaxBatchSize(MiscUtil.getIntProperty(props, propPrefix + "."
- + PROP_BATCH_SIZE, getMaxBatchSize()));
- setMaxQueueSize(MiscUtil.getIntProperty(props, propPrefix + "."
- + PROP_QUEUE_SIZE, getMaxQueueSize()));
- setMaxBatchInterval(MiscUtil.getIntProperty(props, propPrefix + "."
- + PROP_BATCH_INTERVAL, getMaxBatchInterval()));
-
- fileSpoolerEnabled = MiscUtil.getBooleanProperty(props, propPrefix
- + "." + PROP_FILE_SPOOL_ENABLE, false);
- String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
- + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR);
- if (fileSpoolerEnabled || logFolderProp != null) {
- LOG.info("File spool is enabled for " + getName()
- + ", logFolderProp=" + logFolderProp + ", " + propPrefix
- + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR + "="
- + fileSpoolerEnabled);
- fileSpoolerEnabled = true;
- fileSpoolMaxWaitTime = MiscUtil.getIntProperty(props, propPrefix
- + "." + PROP_FILE_SPOOL_WAIT_FOR_FULL_DRAIN,
- fileSpoolMaxWaitTime);
- fileSpoolDrainThresholdPercent = MiscUtil.getIntProperty(props,
- propPrefix + "." + PROP_FILE_SPOOL_QUEUE_THRESHOLD,
- fileSpoolDrainThresholdPercent);
- fileSpooler = new AuditFileSpool(this, consumer);
- fileSpooler.init(props, basePropertyName);
- } else {
- LOG.info("File spool is disabled for " + getName());
- }
-
- try {
- new GsonBuilder().setDateFormat("yyyyMMdd-HH:mm:ss.SSS-Z").create();
- } catch (Throwable excp) {
- LOG.warn(
- "Log4jAuditProvider.init(): failed to create GsonBuilder object. events will be formated using toString(), instead of Json",
- excp);
- }
-
- mLogFailureReportMinIntervalInMs = MiscUtil.getIntProperty(props,
- AUDIT_LOG_FAILURE_REPORT_MIN_INTERVAL_PROP, 60 * 1000);
-
- }
-
- public AuditProvider getConsumer() {
- return consumer;
- }
-
- public void setConsumer(AuditProvider consumer) {
- this.consumer = consumer;
- }
-
- public void logFailedEvent(AuditEventBase event) {
- logFailedEvent(event, null);
- }
-
- public void logFailedEvent(AuditEventBase event, Throwable excp) {
- long now = System.currentTimeMillis();
-
- long timeSinceLastReport = now - mFailedLogLastReportTime.get();
- long countSinceLastReport = mFailedLogCountSinceLastReport
- .incrementAndGet();
- long countLifeTime = mFailedLogCountLifeTime.incrementAndGet();
-
- if (timeSinceLastReport >= mLogFailureReportMinIntervalInMs) {
- mFailedLogLastReportTime.set(now);
- mFailedLogCountSinceLastReport.set(0);
-
- if (excp != null) {
- LOG.warn(
- "failed to log audit event: "
- + MiscUtil.stringify(event), excp);
- } else {
- LOG.warn("failed to log audit event: "
- + MiscUtil.stringify(event));
- }
-
- if (countLifeTime > 1) { // no stats to print for the 1st failure
- LOG.warn("Log failure count: " + countSinceLastReport
- + " in past "
- + formatIntervalForLog(timeSinceLastReport) + "; "
- + countLifeTime + " during process lifetime");
- }
- }
- }
-
- public void logFailedEvent(Collection<AuditEventBase> events, Throwable excp) {
- for (AuditEventBase event : events) {
- logFailedEvent(event, excp);
- }
- }
-
- public void logFailedEventJSON(String event, Throwable excp) {
- long now = System.currentTimeMillis();
-
- long timeSinceLastReport = now - mFailedLogLastReportTime.get();
- long countSinceLastReport = mFailedLogCountSinceLastReport
- .incrementAndGet();
- long countLifeTime = mFailedLogCountLifeTime.incrementAndGet();
-
- if (timeSinceLastReport >= mLogFailureReportMinIntervalInMs) {
- mFailedLogLastReportTime.set(now);
- mFailedLogCountSinceLastReport.set(0);
-
- if (excp != null) {
- LOG.warn("failed to log audit event: " + event, excp);
- } else {
- LOG.warn("failed to log audit event: " + event);
- }
-
- if (countLifeTime > 1) { // no stats to print for the 1st failure
- LOG.warn("Log failure count: " + countSinceLastReport
- + " in past "
- + formatIntervalForLog(timeSinceLastReport) + "; "
- + countLifeTime + " during process lifetime");
- }
- }
- }
-
- public void logFailedEventJSON(Collection<String> events, Throwable excp) {
- for (String event : events) {
- logFailedEventJSON(event, excp);
- }
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger.
- * audit.model.AuditEventBase)
- */
- @Override
- public boolean log(AuditEventBase event) {
- List<AuditEventBase> eventList = new ArrayList<AuditEventBase>();
- eventList.add(event);
- return log(eventList);
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#logJSON(java.lang.String)
- */
- @Override
- public boolean logJSON(String event) {
- AuditEventBase eventObj = MiscUtil.fromJson(event,
- AuthzAuditEvent.class);
- return log(eventObj);
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#logJSON(java.util.Collection
- * )
- */
- @Override
- public boolean logJSON(Collection<String> events) {
- boolean ret = true;
- for (String event : events) {
- ret = logJSON(event);
- if (!ret) {
- break;
- }
- }
- return ret;
- }
-
- public void setName(String name) {
- providerName = name;
- }
-
- @Override
- public String getName() {
- return providerName;
- }
-
- @Override
- public boolean isDrain() {
- return isDrain;
- }
-
- public void setDrain(boolean isDrain) {
- this.isDrain = isDrain;
- }
-
- public int getMaxQueueSize() {
- return maxQueueSize;
- }
-
- public void setMaxQueueSize(int maxQueueSize) {
- this.maxQueueSize = maxQueueSize;
- }
-
- @Override
- public int getMaxBatchInterval() {
- return maxBatchInterval;
- }
-
- public void setMaxBatchInterval(int maxBatchInterval) {
- this.maxBatchInterval = maxBatchInterval;
- }
-
- @Override
- public int getMaxBatchSize() {
- return maxBatchSize;
- }
-
- public void setMaxBatchSize(int maxBatchSize) {
- this.maxBatchSize = maxBatchSize;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete()
- */
- @Override
- public void waitToComplete() {
- if (consumer != null) {
- consumer.waitToComplete(-1);
- }
- }
-
- @Override
- public void waitToComplete(long timeout) {
- if (consumer != null) {
- consumer.waitToComplete(timeout);
- }
- }
-
- @Override
- public boolean isFlushPending() {
- return false;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#getLastFlushTime()
- */
- @Override
- public long getLastFlushTime() {
- if (consumer != null) {
- return consumer.getLastFlushTime();
- }
- return 0;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#flush()
- */
- @Override
- public void flush() {
- if (consumer != null) {
- consumer.flush();
- }
- }
-
- public AtomicLong getLifeTimeInLogCount() {
- return lifeTimeInLogCount;
- }
-
- public long addLifeTimeInLogCount(long count) {
- return lifeTimeInLogCount.addAndGet(count);
- }
-
- public void logError(String msg) {
- long currTimeMS = System.currentTimeMillis();
- if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) {
- LOG.error(msg);
- lastErrorLogMS = currTimeMS;
- }
- }
-
- public void logError(String msg, Throwable ex) {
- long currTimeMS = System.currentTimeMillis();
- if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) {
- LOG.error(msg, ex);
- lastErrorLogMS = currTimeMS;
- }
- }
-
- public String getTimeDiffStr(long time1, long time2) {
- long timeInMs = Math.abs(time1 - time2);
- return formatIntervalForLog(timeInMs);
- }
-
- public String formatIntervalForLog(long timeInMs) {
- long hours = timeInMs / (60 * 60 * 1000);
- long minutes = (timeInMs / (60 * 1000)) % 60;
- long seconds = (timeInMs % (60 * 1000)) / 1000;
- long mSeconds = (timeInMs % (1000));
-
- if (hours > 0)
- return String.format("%02d:%02d:%02d.%03d hours", hours, minutes,
- seconds, mSeconds);
- else if (minutes > 0)
- return String.format("%02d:%02d.%03d minutes", minutes, seconds,
- mSeconds);
- else if (seconds > 0)
- return String.format("%02d.%03d seconds", seconds, mSeconds);
- else
- return String.format("%03d milli-seconds", mSeconds);
- }
-}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java
index ab6a74a..ca842f3 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BufferedAuditProvider.java
@@ -23,7 +23,7 @@ import java.util.Properties;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
-public abstract class BufferedAuditProvider extends BaseAuditProvider {
+public abstract class BufferedAuditProvider extends BaseAuditHandler {
private LogBuffer<AuditEventBase> mBuffer = null;
private LogDestination<AuditEventBase> mDestination = null;
@@ -107,16 +107,6 @@ public abstract class BufferedAuditProvider extends BaseAuditProvider {
}
@Override
- public boolean isFlushPending() {
- return false;
- }
-
- @Override
- public long getLastFlushTime() {
- return 0;
- }
-
- @Override
public void flush() {
}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java
index f4bd90c..d475f89 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java
@@ -177,16 +177,6 @@ public class DbAuditProvider extends AuditDestination {
}
@Override
- public boolean isFlushPending() {
- return mUncommitted.size() > 0;
- }
-
- @Override
- public long getLastFlushTime() {
- return mLastCommitTime;
- }
-
- @Override
public void flush() {
if(mUncommitted.size() > 0) {
boolean isSuccess = commitTransaction();
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java
index 619a99d..05f882f 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java
@@ -24,7 +24,7 @@ import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
-public class DummyAuditProvider implements AuditProvider {
+public class DummyAuditProvider implements AuditHandler {
@Override
public void init(Properties prop) {
// intentionally left empty
@@ -74,23 +74,6 @@ public class DummyAuditProvider implements AuditProvider {
// intentionally left empty
}
-
- @Override
- public int getMaxBatchSize() {
- // TODO Auto-generated method stub
- return 0;
- }
-
- @Override
- public boolean isFlushPending() {
- return false;
- }
-
- @Override
- public long getLastFlushTime() {
- return 0;
- }
-
@Override
public void flush() {
// intentionally left empty
@@ -120,20 +103,4 @@ public class DummyAuditProvider implements AuditProvider {
return this.getClass().getName();
}
- /* (non-Javadoc)
- * @see org.apache.ranger.audit.provider.AuditProvider#isDrain()
- */
- @Override
- public boolean isDrain() {
- return false;
- }
-
- /* (non-Javadoc)
- * @see org.apache.ranger.audit.provider.AuditProvider#getMaxBatchInterval()
- */
- @Override
- public int getMaxBatchInterval() {
- // TODO Auto-generated method stub
- return 0;
- }
}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java
index 040a045..0402de2 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java
@@ -27,8 +27,6 @@ import org.apache.ranger.audit.destination.AuditDestination;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
-import com.sun.tools.hat.internal.util.Misc;
-
public class Log4jAuditProvider extends AuditDestination {
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java
index 876fa5b..4c1593a 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java
@@ -26,18 +26,18 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
-public class MultiDestAuditProvider extends BaseAuditProvider {
+public class MultiDestAuditProvider extends BaseAuditHandler {
private static final Log LOG = LogFactory
.getLog(MultiDestAuditProvider.class);
- protected List<AuditProvider> mProviders = new ArrayList<AuditProvider>();
+ protected List<AuditHandler> mProviders = new ArrayList<AuditHandler>();
public MultiDestAuditProvider() {
LOG.info("MultiDestAuditProvider: creating..");
}
- public MultiDestAuditProvider(AuditProvider provider) {
+ public MultiDestAuditProvider(AuditHandler provider) {
addAuditProvider(provider);
}
@@ -47,7 +47,7 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
super.init(props);
- for (AuditProvider provider : mProviders) {
+ for (AuditHandler provider : mProviders) {
try {
provider.init(props);
} catch (Throwable excp) {
@@ -57,7 +57,7 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
}
}
- public void addAuditProvider(AuditProvider provider) {
+ public void addAuditProvider(AuditHandler provider) {
if (provider != null) {
LOG.info("MultiDestAuditProvider.addAuditProvider(providerType="
+ provider.getClass().getCanonicalName() + ")");
@@ -66,9 +66,9 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
}
}
- public void addAuditProviders(List<AuditProvider> providers) {
+ public void addAuditProviders(List<AuditHandler> providers) {
if (providers != null) {
- for (AuditProvider provider : providers) {
+ for (AuditHandler provider : providers) {
LOG.info("Adding " + provider.getName()
+ " as consumer to MultiDestination " + getName());
addAuditProvider(provider);
@@ -78,7 +78,7 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
@Override
public boolean log(AuditEventBase event) {
- for (AuditProvider provider : mProviders) {
+ for (AuditHandler provider : mProviders) {
try {
provider.log(event);
} catch (Throwable excp) {
@@ -90,7 +90,7 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
@Override
public boolean log(Collection<AuditEventBase> events) {
- for (AuditProvider provider : mProviders) {
+ for (AuditHandler provider : mProviders) {
try {
provider.log(events);
} catch (Throwable excp) {
@@ -102,7 +102,7 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
@Override
public boolean logJSON(String event) {
- for (AuditProvider provider : mProviders) {
+ for (AuditHandler provider : mProviders) {
try {
provider.logJSON(event);
} catch (Throwable excp) {
@@ -114,7 +114,7 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
@Override
public boolean logJSON(Collection<String> events) {
- for (AuditProvider provider : mProviders) {
+ for (AuditHandler provider : mProviders) {
try {
provider.logJSON(events);
} catch (Throwable excp) {
@@ -126,7 +126,7 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
@Override
public void start() {
- for (AuditProvider provider : mProviders) {
+ for (AuditHandler provider : mProviders) {
try {
provider.start();
} catch (Throwable excp) {
@@ -138,7 +138,7 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
@Override
public void stop() {
- for (AuditProvider provider : mProviders) {
+ for (AuditHandler provider : mProviders) {
try {
provider.stop();
} catch (Throwable excp) {
@@ -150,7 +150,7 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
@Override
public void waitToComplete() {
- for (AuditProvider provider : mProviders) {
+ for (AuditHandler provider : mProviders) {
try {
provider.waitToComplete();
} catch (Throwable excp) {
@@ -163,7 +163,7 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
@Override
public void waitToComplete(long timeout) {
- for (AuditProvider provider : mProviders) {
+ for (AuditHandler provider : mProviders) {
try {
provider.waitToComplete(timeout);
} catch (Throwable excp) {
@@ -175,35 +175,8 @@ public class MultiDestAuditProvider extends BaseAuditProvider {
}
@Override
- public boolean isFlushPending() {
- for (AuditProvider provider : mProviders) {
- if (provider.isFlushPending()) {
- return true;
- }
- }
-
- return false;
- }
-
- @Override
- public long getLastFlushTime() {
- long lastFlushTime = 0;
- for (AuditProvider provider : mProviders) {
- long flushTime = provider.getLastFlushTime();
-
- if (flushTime != 0) {
- if (lastFlushTime == 0 || lastFlushTime > flushTime) {
- lastFlushTime = flushTime;
- }
- }
- }
-
- return lastFlushTime;
- }
-
- @Override
public void flush() {
- for (AuditProvider provider : mProviders) {
+ for (AuditHandler provider : mProviders) {
try {
provider.flush();
} catch (Throwable excp) {
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java
index 5f39e69..2c77b40 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java
@@ -25,12 +25,12 @@ import kafka.producer.ProducerConfig;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.destination.AuditDestination;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
-import org.apache.ranger.audit.provider.BaseAuditProvider;
import org.apache.ranger.audit.provider.MiscUtil;
-public class KafkaAuditProvider extends BaseAuditProvider {
+public class KafkaAuditProvider extends AuditDestination {
private static final Log LOG = LogFactory.getLog(KafkaAuditProvider.class);
public static final String AUDIT_MAX_QUEUE_SIZE_PROP = "xasecure.audit.kafka.async.max.queue.size";
@@ -47,11 +47,6 @@ public class KafkaAuditProvider extends BaseAuditProvider {
LOG.info("init() called");
super.init(props);
- setMaxQueueSize(MiscUtil.getIntProperty(props,
- AUDIT_MAX_QUEUE_SIZE_PROP, AUDIT_MAX_QUEUE_SIZE_DEFAULT));
- setMaxBatchInterval(MiscUtil.getIntProperty(props,
- AUDIT_MAX_QUEUE_SIZE_PROP,
- AUDIT_BATCH_INTERVAL_DEFAULT_MS));
topic = MiscUtil.getStringProperty(props,
AUDIT_KAFKA_TOPIC_NAME);
if (topic == null || topic.isEmpty()) {
@@ -176,19 +171,6 @@ public class KafkaAuditProvider extends BaseAuditProvider {
}
@Override
- public boolean isFlushPending() {
- LOG.info("isFlushPending() called");
- return false;
- }
-
- @Override
- public long getLastFlushTime() {
- LOG.info("getLastFlushTime() called");
-
- return 0;
- }
-
- @Override
public void flush() {
LOG.info("flush() called");
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java
index 9ee4ec0..53e4348 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java
@@ -25,16 +25,16 @@ import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.destination.AuditDestination;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
-import org.apache.ranger.audit.provider.BaseAuditProvider;
import org.apache.ranger.audit.provider.MiscUtil;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.response.UpdateResponse;
import org.apache.solr.common.SolrInputDocument;
-public class SolrAuditProvider extends BaseAuditProvider {
+public class SolrAuditProvider extends AuditDestination {
private static final Log LOG = LogFactory.getLog(SolrAuditProvider.class);
public static final String AUDIT_MAX_QUEUE_SIZE_PROP = "xasecure.audit.solr.async.max.queue.size";
@@ -56,11 +56,6 @@ public class SolrAuditProvider extends BaseAuditProvider {
LOG.info("init() called");
super.init(props);
- setMaxQueueSize(MiscUtil.getIntProperty(props,
- AUDIT_MAX_QUEUE_SIZE_PROP, AUDIT_MAX_QUEUE_SIZE_DEFAULT));
- setMaxBatchInterval(MiscUtil.getIntProperty(props,
- AUDIT_MAX_QUEUE_SIZE_PROP,
- AUDIT_BATCH_INTERVAL_DEFAULT_MS));
retryWaitTime = MiscUtil.getIntProperty(props,
AUDIT_RETRY_WAIT_PROP, retryWaitTime);
}
@@ -241,29 +236,7 @@ public class SolrAuditProvider extends BaseAuditProvider {
public void waitToComplete(long timeout) {
}
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
- */
- @Override
- public boolean isFlushPending() {
- // TODO Auto-generated method stub
- return false;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#getLastFlushTime()
- */
- @Override
- public long getLastFlushTime() {
- // TODO Auto-generated method stub
- return 0;
- }
-
+
/*
* (non-Javadoc)
*
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java
index a6f291d..d16fff9 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java
@@ -21,32 +21,27 @@ package org.apache.ranger.audit.queue;
import java.util.ArrayList;
import java.util.Collection;
-import java.util.concurrent.LinkedTransferQueue;
+import java.util.concurrent.LinkedBlockingQueue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
-import org.apache.ranger.audit.provider.AuditProvider;
-import org.apache.ranger.audit.provider.BaseAuditProvider;
+import org.apache.ranger.audit.provider.AuditHandler;
/**
* This is a non-blocking queue with no limit on capacity.
*/
-public class AuditAsyncQueue extends BaseAuditProvider implements Runnable {
+public class AuditAsyncQueue extends AuditQueue implements Runnable {
private static final Log logger = LogFactory.getLog(AuditAsyncQueue.class);
- LinkedTransferQueue<AuditEventBase> queue = new LinkedTransferQueue<AuditEventBase>();
+ LinkedBlockingQueue<AuditEventBase> queue = new LinkedBlockingQueue<AuditEventBase>();
Thread consumerThread = null;
static final int MAX_DRAIN = 1000;
static int threadCount = 0;
static final String DEFAULT_NAME = "async";
- public AuditAsyncQueue() {
- setName(DEFAULT_NAME);
- }
-
- public AuditAsyncQueue(AuditProvider consumer) {
+ public AuditAsyncQueue(AuditHandler consumer) {
super(consumer);
setName(DEFAULT_NAME);
}
@@ -65,7 +60,6 @@ public class AuditAsyncQueue extends BaseAuditProvider implements Runnable {
return false;
}
queue.add(event);
- addLifeTimeInLogCount(1);
return true;
}
@@ -90,6 +84,9 @@ public class AuditAsyncQueue extends BaseAuditProvider implements Runnable {
public void start() {
if (consumer != null) {
consumer.start();
+ } else {
+ logger.error("consumer is not set. Nothing will be sent to any consumer. name="
+ + getName());
}
consumerThread = new Thread(this, this.getClass().getName()
@@ -110,23 +107,10 @@ public class AuditAsyncQueue extends BaseAuditProvider implements Runnable {
if (consumerThread != null) {
consumerThread.interrupt();
}
- consumerThread = null;
} catch (Throwable t) {
// ignore any exception
}
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
- */
- @Override
- public boolean isFlushPending() {
- if (queue.isEmpty()) {
- return consumer.isFlushPending();
- }
- return true;
+ consumerThread = null;
}
/*
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java
index 5e21efc..8ed07bd 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java
@@ -29,10 +29,9 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
-import org.apache.ranger.audit.provider.AuditProvider;
-import org.apache.ranger.audit.provider.BaseAuditProvider;
+import org.apache.ranger.audit.provider.AuditHandler;
-public class AuditBatchQueue extends BaseAuditProvider implements Runnable {
+public class AuditBatchQueue extends AuditQueue implements Runnable {
private static final Log logger = LogFactory.getLog(AuditBatchQueue.class);
private BlockingQueue<AuditEventBase> queue = null;
@@ -41,10 +40,7 @@ public class AuditBatchQueue extends BaseAuditProvider implements Runnable {
Thread consumerThread = null;
static int threadCount = 0;
- public AuditBatchQueue() {
- }
-
- public AuditBatchQueue(AuditProvider consumer) {
+ public AuditBatchQueue(AuditHandler consumer) {
super(consumer);
}
@@ -59,7 +55,6 @@ public class AuditBatchQueue extends BaseAuditProvider implements Runnable {
public boolean log(AuditEventBase event) {
// Add to batchQueue. Block if full
queue.add(event);
- addLifeTimeInLogCount(1);
return true;
}
@@ -130,10 +125,10 @@ public class AuditBatchQueue extends BaseAuditProvider implements Runnable {
if (consumerThread != null) {
consumerThread.interrupt();
}
- consumerThread = null;
} catch (Throwable t) {
// ignore any exception
}
+ consumerThread = null;
}
/*
@@ -187,19 +182,6 @@ public class AuditBatchQueue extends BaseAuditProvider implements Runnable {
/*
* (non-Javadoc)
*
- * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
- */
- @Override
- public boolean isFlushPending() {
- if (queue.isEmpty()) {
- return consumer.isFlushPending();
- }
- return true;
- }
-
- /*
- * (non-Javadoc)
- *
* @see org.apache.ranger.audit.provider.AuditProvider#flush()
*/
@Override
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java
index 66d1573..a1c32b9 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java
@@ -35,13 +35,13 @@ import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedTransferQueue;
+import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
-import org.apache.ranger.audit.provider.AuditProvider;
+import org.apache.ranger.audit.provider.AuditHandler;
import org.apache.ranger.audit.provider.MiscUtil;
import com.google.gson.Gson;
@@ -69,10 +69,10 @@ public class AuditFileSpool implements Runnable {
// "filespool.index.done_filename";
public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms";
- AuditProvider queueProvider = null;
- AuditProvider consumerProvider = null;
+ AuditQueue queueProvider = null;
+ AuditHandler consumerProvider = null;
- BlockingQueue<AuditIndexRecord> indexQueue = new LinkedTransferQueue<AuditIndexRecord>();
+ BlockingQueue<AuditIndexRecord> indexQueue = new LinkedBlockingQueue<AuditIndexRecord>();
// Folder and File attributes
File logFolder = null;
@@ -108,10 +108,10 @@ public class AuditFileSpool implements Runnable {
boolean isDrain = false;
boolean isDestDown = true;
- private static Gson gson = null;
+ private Gson gson = null;
- public AuditFileSpool(AuditProvider queueProvider,
- AuditProvider consumerProvider) {
+ public AuditFileSpool(AuditQueue queueProvider,
+ AuditHandler consumerProvider) {
this.queueProvider = queueProvider;
this.consumerProvider = consumerProvider;
}
@@ -120,12 +120,12 @@ public class AuditFileSpool implements Runnable {
init(prop, null);
}
- public void init(Properties props, String basePropertyName) {
+ public boolean init(Properties props, String basePropertyName) {
if (initDone) {
logger.error("init() called more than once. queueProvider="
+ queueProvider.getName() + ", consumerProvider="
+ consumerProvider.getName());
- return;
+ return true;
}
String propPrefix = "xasecure.audit.filespool";
if (basePropertyName != null) {
@@ -162,22 +162,22 @@ public class AuditFileSpool implements Runnable {
+ queueProvider.getName());
if (logFolderProp == null || logFolderProp.isEmpty()) {
- logger.error("Audit spool folder is not configured. Please set "
+ logger.fatal("Audit spool folder is not configured. Please set "
+ propPrefix
+ "."
+ PROP_FILE_SPOOL_LOCAL_DIR
+ ". queueName=" + queueProvider.getName());
- return;
+ return false;
}
logFolder = new File(logFolderProp);
if (!logFolder.isDirectory()) {
logFolder.mkdirs();
if (!logFolder.isDirectory()) {
- logger.error("File Spool folder not found and can't be created. folder="
+ logger.fatal("File Spool folder not found and can't be created. folder="
+ logFolder.getAbsolutePath()
+ ", queueName="
+ queueProvider.getName());
- return;
+ return false;
}
}
logger.info("logFolder=" + logFolder + ", queueName="
@@ -202,7 +202,7 @@ public class AuditFileSpool implements Runnable {
+ archiveFolder.getAbsolutePath()
+ ", queueName="
+ queueProvider.getName());
- return;
+ return false;
}
}
logger.info("archiveFolder=" + archiveFolder + ", queueName="
@@ -218,17 +218,30 @@ public class AuditFileSpool implements Runnable {
indexFile = new File(logFolder, indexFileName);
if (!indexFile.exists()) {
- indexFile.createNewFile();
+ boolean ret = indexFile.createNewFile();
+ if (!ret) {
+ logger.fatal("Error creating index file. fileName="
+ + indexDoneFile.getPath());
+ return false;
+ }
}
logger.info("indexFile=" + indexFile + ", queueName="
+ queueProvider.getName());
int lastDot = indexFileName.lastIndexOf('.');
+ if (lastDot < 0) {
+ lastDot = indexFileName.length() - 1;
+ }
indexDoneFileName = indexFileName.substring(0, lastDot)
+ "_closed.json";
indexDoneFile = new File(logFolder, indexDoneFileName);
if (!indexDoneFile.exists()) {
- indexDoneFile.createNewFile();
+ boolean ret = indexDoneFile.createNewFile();
+ if (!ret) {
+ logger.fatal("Error creating index done file. fileName="
+ + indexDoneFile.getPath());
+ return false;
+ }
}
logger.info("indexDoneFile=" + indexDoneFile + ", queueName="
+ queueProvider.getName());
@@ -252,8 +265,6 @@ public class AuditFileSpool implements Runnable {
}
}
printIndex();
- // One more loop to add the rest of the pending records in reverse
- // order
for (int i = 0; i < indexRecords.size(); i++) {
AuditIndexRecord auditIndexRecord = indexRecords.get(i);
if (auditIndexRecord.status.equals(SPOOL_FILE_STATUS.pending)) {
@@ -261,18 +272,19 @@ public class AuditFileSpool implements Runnable {
if (!consumerFile.exists()) {
logger.error("INIT: Consumer file="
+ consumerFile.getPath() + " not found.");
- System.exit(1);
+ } else {
+ indexQueue.add(auditIndexRecord);
}
- indexQueue.add(auditIndexRecord);
}
}
} catch (Throwable t) {
logger.fatal("Error initializing File Spooler. queue="
+ queueProvider.getName(), t);
- return;
+ return false;
}
initDone = true;
+ return true;
}
/**
@@ -328,6 +340,7 @@ public class AuditFileSpool implements Runnable {
out.flush();
out.close();
+ break;
} catch (Throwable t) {
logger.debug("Error closing spool out file.", t);
}
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java
new file mode 100644
index 0000000..4c3ac5f
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.queue;
+
+import java.util.Properties;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.ranger.audit.provider.AuditHandler;
+import org.apache.ranger.audit.provider.BaseAuditHandler;
+import org.apache.ranger.audit.provider.MiscUtil;
+
+public abstract class AuditQueue extends BaseAuditHandler {
+ private static final Log LOG = LogFactory.getLog(AuditQueue.class);
+
+ public static final int AUDIT_MAX_QUEUE_SIZE_DEFAULT = 1024 * 1024;
+ public static final int AUDIT_BATCH_INTERVAL_DEFAULT_MS = 1000;
+ public static final int AUDIT_BATCH_SIZE_DEFAULT = 1000;
+
+ private int maxQueueSize = AUDIT_MAX_QUEUE_SIZE_DEFAULT;
+ private int maxBatchInterval = AUDIT_BATCH_INTERVAL_DEFAULT_MS;
+ private int maxBatchSize = AUDIT_BATCH_SIZE_DEFAULT;
+
+ public static final String PROP_QUEUE = "queue";
+
+ public static final String PROP_BATCH_SIZE = "batch.size";
+ public static final String PROP_QUEUE_SIZE = "queue.size";
+ public static final String PROP_BATCH_INTERVAL = "batch.interval.ms";
+
+ public static final String PROP_FILE_SPOOL_ENABLE = "filespool.enable";
+ public static final String PROP_FILE_SPOOL_WAIT_FOR_FULL_DRAIN = "filespool.drain.full.wait.ms";
+ public static final String PROP_FILE_SPOOL_QUEUE_THRESHOLD = "filespool.drain.threshold.percent";
+
+ final protected AuditHandler consumer;
+ protected AuditFileSpool fileSpooler = null;
+
+ private boolean isDrain = false;
+
+ protected boolean fileSpoolerEnabled = false;
+ protected int fileSpoolMaxWaitTime = 5 * 60 * 1000; // Default 5 minutes
+ protected int fileSpoolDrainThresholdPercent = 80;
+
+ /**
+ * @param consumer
+ */
+ public AuditQueue(AuditHandler consumer) {
+ this.consumer = consumer;
+ }
+
+ @Override
+ public void init(Properties props, String basePropertyName) {
+ LOG.info("BaseAuditProvider.init()");
+ super.init(props, basePropertyName);
+
+ setMaxBatchSize(MiscUtil.getIntProperty(props, propPrefix + "."
+ + PROP_BATCH_SIZE, getMaxBatchSize()));
+ setMaxQueueSize(MiscUtil.getIntProperty(props, propPrefix + "."
+ + PROP_QUEUE_SIZE, getMaxQueueSize()));
+ setMaxBatchInterval(MiscUtil.getIntProperty(props, propPrefix + "."
+ + PROP_BATCH_INTERVAL, getMaxBatchInterval()));
+
+ fileSpoolerEnabled = MiscUtil.getBooleanProperty(props, propPrefix
+ + "." + PROP_FILE_SPOOL_ENABLE, false);
+ String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
+ + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR);
+ if (fileSpoolerEnabled || logFolderProp != null) {
+ LOG.info("File spool is enabled for " + getName()
+ + ", logFolderProp=" + logFolderProp + ", " + propPrefix
+ + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR + "="
+ + fileSpoolerEnabled);
+ fileSpoolerEnabled = true;
+ fileSpoolMaxWaitTime = MiscUtil.getIntProperty(props, propPrefix
+ + "." + PROP_FILE_SPOOL_WAIT_FOR_FULL_DRAIN,
+ fileSpoolMaxWaitTime);
+ fileSpoolDrainThresholdPercent = MiscUtil.getIntProperty(props,
+ propPrefix + "." + PROP_FILE_SPOOL_QUEUE_THRESHOLD,
+ fileSpoolDrainThresholdPercent);
+ fileSpooler = new AuditFileSpool(this, consumer);
+ if (!fileSpooler.init(props, basePropertyName)) {
+ fileSpoolerEnabled = false;
+ LOG.fatal("Couldn't initialize file spooler. Disabling it. queue="
+ + getName() + ", consumer=" + consumer.getName());
+ }
+ } else {
+ LOG.info("File spool is disabled for " + getName());
+ }
+
+ }
+
+ public AuditHandler getConsumer() {
+ return consumer;
+ }
+
+ public boolean isDrain() {
+ return isDrain;
+ }
+
+ public void setDrain(boolean isDrain) {
+ this.isDrain = isDrain;
+ }
+
+ public int getMaxQueueSize() {
+ return maxQueueSize;
+ }
+
+ public void setMaxQueueSize(int maxQueueSize) {
+ this.maxQueueSize = maxQueueSize;
+ }
+
+ public int getMaxBatchInterval() {
+ return maxBatchInterval;
+ }
+
+ public void setMaxBatchInterval(int maxBatchInterval) {
+ this.maxBatchInterval = maxBatchInterval;
+ }
+
+ public int getMaxBatchSize() {
+ return maxBatchSize;
+ }
+
+ public void setMaxBatchSize(int maxBatchSize) {
+ this.maxBatchSize = maxBatchSize;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete()
+ */
+ @Override
+ public void waitToComplete() {
+ if (consumer != null) {
+ consumer.waitToComplete(-1);
+ }
+ }
+
+ @Override
+ public void waitToComplete(long timeout) {
+ if (consumer != null) {
+ consumer.waitToComplete(timeout);
+ }
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#flush()
+ */
+ @Override
+ public void flush() {
+ if (consumer != null) {
+ consumer.flush();
+ }
+ }
+
+}
[11/12] incubator-ranger git commit: RANGER-397 - Implement reliable
streaming audits to configurable destinations - Incorporate Review Feedback
Posted by bo...@apache.org.
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java
index e102d8b..3e1940b 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java
@@ -25,26 +25,25 @@ import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
-import java.util.concurrent.LinkedTransferQueue;
+import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
-import org.apache.ranger.audit.provider.AuditProvider;
-import org.apache.ranger.audit.provider.BaseAuditProvider;
+import org.apache.ranger.audit.provider.AuditHandler;
import org.apache.ranger.audit.provider.MiscUtil;
/**
* This is a non-blocking queue with no limit on capacity.
*/
-public class AuditSummaryQueue extends BaseAuditProvider implements Runnable {
+public class AuditSummaryQueue extends AuditQueue implements Runnable {
private static final Log logger = LogFactory
.getLog(AuditSummaryQueue.class);
public static final String PROP_SUMMARY_INTERVAL = "summary.interval.ms";
- LinkedTransferQueue<AuditEventBase> queue = new LinkedTransferQueue<AuditEventBase>();
+ LinkedBlockingQueue<AuditEventBase> queue = new LinkedBlockingQueue<AuditEventBase>();
Thread consumerThread = null;
static int threadCount = 0;
@@ -52,15 +51,11 @@ public class AuditSummaryQueue extends BaseAuditProvider implements Runnable {
private static final int MAX_DRAIN = 100000;
- private int maxSummaryInterval = 5000;
+ private int maxSummaryIntervalMs = 5000;
HashMap<String, AuditSummary> summaryMap = new HashMap<String, AuditSummary>();
- public AuditSummaryQueue() {
- setName(DEFAULT_NAME);
- }
-
- public AuditSummaryQueue(AuditProvider consumer) {
+ public AuditSummaryQueue(AuditHandler consumer) {
super(consumer);
setName(DEFAULT_NAME);
}
@@ -68,9 +63,9 @@ public class AuditSummaryQueue extends BaseAuditProvider implements Runnable {
@Override
public void init(Properties props, String propPrefix) {
super.init(props, propPrefix);
- maxSummaryInterval = MiscUtil.getIntProperty(props, propPrefix + "."
- + PROP_SUMMARY_INTERVAL, maxSummaryInterval);
- logger.info("maxSummaryInterval=" + maxSummaryInterval + ", name="
+ maxSummaryIntervalMs = MiscUtil.getIntProperty(props, propPrefix + "."
+ + PROP_SUMMARY_INTERVAL, maxSummaryIntervalMs);
+ logger.info("maxSummaryInterval=" + maxSummaryIntervalMs + ", name="
+ getName());
}
@@ -88,7 +83,6 @@ public class AuditSummaryQueue extends BaseAuditProvider implements Runnable {
return false;
}
queue.add(event);
- addLifeTimeInLogCount(1);
return true;
}
@@ -133,23 +127,10 @@ public class AuditSummaryQueue extends BaseAuditProvider implements Runnable {
if (consumerThread != null) {
consumerThread.interrupt();
}
- consumerThread = null;
} catch (Throwable t) {
// ignore any exception
}
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
- */
- @Override
- public boolean isFlushPending() {
- if (queue.isEmpty()) {
- return consumer.isFlushPending();
- }
- return true;
+ consumerThread = null;
}
/*
@@ -164,7 +145,7 @@ public class AuditSummaryQueue extends BaseAuditProvider implements Runnable {
while (true) {
// Time to next dispatch
long nextDispatchDuration = lastDispatchTime
- - System.currentTimeMillis() + maxSummaryInterval;
+ - System.currentTimeMillis() + maxSummaryIntervalMs;
Collection<AuditEventBase> eventList = new ArrayList<AuditEventBase>();
@@ -184,7 +165,7 @@ public class AuditSummaryQueue extends BaseAuditProvider implements Runnable {
} else {
// poll returned due to timeout, so reseting clock
nextDispatchDuration = lastDispatchTime
- - System.currentTimeMillis() + maxSummaryInterval;
+ - System.currentTimeMillis() + maxSummaryIntervalMs;
lastDispatchTime = System.currentTimeMillis();
}
} catch (InterruptedException e) {
@@ -213,6 +194,9 @@ public class AuditSummaryQueue extends BaseAuditProvider implements Runnable {
}
if (isDrain() || nextDispatchDuration <= 0) {
+ // Reset time just before sending the logs
+ lastDispatchTime = System.currentTimeMillis();
+
for (Map.Entry<String, AuditSummary> entry : summaryMap
.entrySet()) {
AuditSummary auditSummary = entry.getValue();
@@ -221,9 +205,6 @@ public class AuditSummaryQueue extends BaseAuditProvider implements Runnable {
- auditSummary.startTime.getTime();
timeDiff = timeDiff > 0 ? timeDiff : 1;
auditSummary.event.setEventDurationMS(timeDiff);
-
- // Reset time just before sending the logs
- lastDispatchTime = System.currentTimeMillis();
boolean ret = consumer.log(auditSummary.event);
if (!ret) {
// We need to drop this event
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java
----------------------------------------------------------------------
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java b/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java
index c2dc955..87c6a8f 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java
@@ -23,7 +23,7 @@ import org.apache.log4j.xml.DOMConfigurator;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
import org.apache.ranger.audit.model.EnumRepositoryType;
-import org.apache.ranger.audit.provider.AuditProvider;
+import org.apache.ranger.audit.provider.AuditHandler;
import org.apache.ranger.audit.provider.AuditProviderFactory;
import org.apache.commons.logging.LogFactory;
@@ -74,7 +74,7 @@ public class TestEvents {
AuditProviderFactory.getInstance().init(auditProperties, "hdfs");
- AuditProvider provider = AuditProviderFactory.getAuditProvider();
+ AuditHandler provider = AuditProviderFactory.getAuditProvider();
LOG.info("provider=" + provider.toString());
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/security-admin/src/test/java/org/apache/ranger/audit/TestAuditQueue.java
----------------------------------------------------------------------
diff --git a/security-admin/src/test/java/org/apache/ranger/audit/TestAuditQueue.java b/security-admin/src/test/java/org/apache/ranger/audit/TestAuditQueue.java
index 45477e2..021c49a 100644
--- a/security-admin/src/test/java/org/apache/ranger/audit/TestAuditQueue.java
+++ b/security-admin/src/test/java/org/apache/ranger/audit/TestAuditQueue.java
@@ -32,14 +32,15 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.destination.FileAuditDestination;
import org.apache.ranger.audit.model.AuthzAuditEvent;
-import org.apache.ranger.audit.provider.AuditProvider;
+import org.apache.ranger.audit.provider.AuditHandler;
import org.apache.ranger.audit.provider.AuditProviderFactory;
-import org.apache.ranger.audit.provider.BaseAuditProvider;
+import org.apache.ranger.audit.provider.BaseAuditHandler;
import org.apache.ranger.audit.provider.MiscUtil;
import org.apache.ranger.audit.provider.MultiDestAuditProvider;
import org.apache.ranger.audit.queue.AuditAsyncQueue;
import org.apache.ranger.audit.queue.AuditBatchQueue;
import org.apache.ranger.audit.queue.AuditFileSpool;
+import org.apache.ranger.audit.queue.AuditQueue;
import org.apache.ranger.audit.queue.AuditSummaryQueue;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -93,9 +94,9 @@ public class TestAuditQueue {
AuditSummaryQueue queue = new AuditSummaryQueue(testConsumer);
Properties props = new Properties();
- props.put(BaseAuditProvider.PROP_DEFAULT_PREFIX + "."
+ props.put(BaseAuditHandler.PROP_DEFAULT_PREFIX + "."
+ AuditSummaryQueue.PROP_SUMMARY_INTERVAL, "" + 300);
- queue.init(props, BaseAuditProvider.PROP_DEFAULT_PREFIX);
+ queue.init(props, BaseAuditHandler.PROP_DEFAULT_PREFIX);
queue.start();
@@ -103,7 +104,7 @@ public class TestAuditQueue {
}
private void commonTestSummary(TestConsumer testConsumer,
- BaseAuditProvider queue) {
+ BaseAuditHandler queue) {
int messageToSend = 0;
int pauseMS = 330;
@@ -171,7 +172,6 @@ public class TestAuditQueue {
}
assertEquals(messageToSend, testConsumer.getSumTotal());
assertEquals(countToCheck, testConsumer.getCountTotal());
- assertNull("Event not in sequnce", testConsumer.isInSequence());
}
@Test
@@ -182,22 +182,23 @@ public class TestAuditQueue {
// Destination
String propPrefix = AuditProviderFactory.AUDIT_DEST_BASE + ".test";
props.put(propPrefix, "enable");
- props.put(BaseAuditProvider.PROP_DEFAULT_PREFIX + "." + "summary" + "."
+ props.put(BaseAuditHandler.PROP_DEFAULT_PREFIX + "." + "summary" + "."
+ "enabled", "true");
- props.put(propPrefix + "." + BaseAuditProvider.PROP_NAME, "test");
- props.put(propPrefix + "." + BaseAuditProvider.PROP_QUEUE, "none");
+ props.put(propPrefix + "." + BaseAuditHandler.PROP_NAME, "test");
+ props.put(propPrefix + "." + AuditQueue.PROP_QUEUE, "none");
- props.put(BaseAuditProvider.PROP_DEFAULT_PREFIX + "."
+ props.put(BaseAuditHandler.PROP_DEFAULT_PREFIX + "."
+ AuditSummaryQueue.PROP_SUMMARY_INTERVAL, "" + 300);
- props.put(propPrefix + "." + BaseAuditProvider.PROP_CLASS_NAME,
+ props.put(propPrefix + "." + BaseAuditHandler.PROP_CLASS_NAME,
TestConsumer.class.getName());
AuditProviderFactory factory = AuditProviderFactory.getInstance();
factory.init(props, "test");
- BaseAuditProvider queue = (BaseAuditProvider) factory.getProvider();
- BaseAuditProvider consumer = (BaseAuditProvider) queue.getConsumer();
- while (consumer.getConsumer() != null) {
- consumer = (BaseAuditProvider) consumer.getConsumer();
+ AuditQueue queue = (AuditQueue) factory.getProvider();
+ BaseAuditHandler consumer = (BaseAuditHandler) queue.getConsumer();
+ while (consumer != null && consumer instanceof AuditQueue) {
+ AuditQueue cQueue = (AuditQueue) consumer;
+ consumer = (BaseAuditHandler) cQueue.getConsumer();
}
assertTrue("Consumer should be TestConsumer. class="
+ consumer.getClass().getName(),
@@ -257,12 +258,12 @@ public class TestAuditQueue {
int queueSize = messageToSend * 2;
int intervalMS = messageToSend * 100; // Deliberately big interval
Properties props = new Properties();
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ props.put(basePropName + "." + AuditQueue.PROP_BATCH_SIZE, ""
+ batchSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ props.put(basePropName + "." + AuditQueue.PROP_QUEUE_SIZE, ""
+ queueSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
- "" + intervalMS);
+ props.put(basePropName + "." + AuditQueue.PROP_BATCH_INTERVAL, ""
+ + intervalMS);
TestConsumer testConsumer = new TestConsumer();
AuditBatchQueue queue = new AuditBatchQueue(testConsumer);
@@ -308,12 +309,12 @@ public class TestAuditQueue {
int expectedBatchSize = (messageToSend * pauseMS) / intervalMS + 1;
Properties props = new Properties();
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ props.put(basePropName + "." + AuditQueue.PROP_BATCH_SIZE, ""
+ batchSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ props.put(basePropName + "." + AuditQueue.PROP_QUEUE_SIZE, ""
+ queueSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
- "" + intervalMS);
+ props.put(basePropName + "." + AuditQueue.PROP_BATCH_INTERVAL, ""
+ + intervalMS);
TestConsumer testConsumer = new TestConsumer();
AuditBatchQueue queue = new AuditBatchQueue(testConsumer);
@@ -356,15 +357,15 @@ public class TestAuditQueue {
int queueSize = messageToSend * 2;
int intervalMS = Integer.MAX_VALUE; // Deliberately big interval
Properties props = new Properties();
- props.put(basePropName + "." + BaseAuditProvider.PROP_NAME,
+ props.put(basePropName + "." + BaseAuditHandler.PROP_NAME,
"testAuditBatchQueueDestDown");
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ props.put(basePropName + "." + AuditQueue.PROP_BATCH_SIZE, ""
+ batchSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ props.put(basePropName + "." + AuditQueue.PROP_QUEUE_SIZE, ""
+ queueSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
- "" + intervalMS);
+ props.put(basePropName + "." + AuditQueue.PROP_BATCH_INTERVAL, ""
+ + intervalMS);
// Enable File Spooling
props.put(basePropName + "." + "filespool.enable", "" + true);
@@ -410,21 +411,20 @@ public class TestAuditQueue {
int intervalMS = 3000; // Deliberately big interval
Properties props = new Properties();
props.put(
- basePropName + "." + BaseAuditProvider.PROP_NAME,
+ basePropName + "." + BaseAuditHandler.PROP_NAME,
"testAuditBatchQueueDestDownFlipFlop_"
+ MiscUtil.generateUniqueId());
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ props.put(basePropName + "." + AuditQueue.PROP_BATCH_SIZE, ""
+ batchSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ props.put(basePropName + "." + AuditQueue.PROP_QUEUE_SIZE, ""
+ queueSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
- "" + intervalMS);
+ props.put(basePropName + "." + AuditQueue.PROP_BATCH_INTERVAL, ""
+ + intervalMS);
// Enable File Spooling
int destRetryMS = 10;
- props.put(
- basePropName + "." + BaseAuditProvider.PROP_FILE_SPOOL_ENABLE,
+ props.put(basePropName + "." + AuditQueue.PROP_FILE_SPOOL_ENABLE,
"" + true);
props.put(
basePropName + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR,
@@ -499,21 +499,20 @@ public class TestAuditQueue {
int maxArchivedFiles = 1;
Properties props = new Properties();
props.put(
- basePropName + "." + BaseAuditProvider.PROP_NAME,
+ basePropName + "." + BaseAuditHandler.PROP_NAME,
"testAuditBatchQueueDestDownRestart_"
+ MiscUtil.generateUniqueId());
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ props.put(basePropName + "." + AuditQueue.PROP_BATCH_SIZE, ""
+ batchSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ props.put(basePropName + "." + AuditQueue.PROP_QUEUE_SIZE, ""
+ queueSize);
- props.put(basePropName + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
- "" + intervalMS);
+ props.put(basePropName + "." + AuditQueue.PROP_BATCH_INTERVAL, ""
+ + intervalMS);
// Enable File Spooling
int destRetryMS = 10;
- props.put(
- basePropName + "." + BaseAuditProvider.PROP_FILE_SPOOL_ENABLE,
+ props.put(basePropName + "." + AuditQueue.PROP_FILE_SPOOL_ENABLE,
"" + true);
props.put(
basePropName + "." + AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR,
@@ -598,7 +597,7 @@ public class TestAuditQueue {
// Destination
String filePropPrefix = AuditProviderFactory.AUDIT_DEST_BASE + ".file";
props.put(filePropPrefix, "enable");
- props.put(filePropPrefix + "." + BaseAuditProvider.PROP_NAME, "file");
+ props.put(filePropPrefix + "." + AuditQueue.PROP_NAME, "file");
props.put(filePropPrefix + "."
+ FileAuditDestination.PROP_FILE_LOCAL_DIR, logFolderName);
props.put(filePropPrefix + "."
@@ -607,21 +606,20 @@ public class TestAuditQueue {
props.put(filePropPrefix + "."
+ FileAuditDestination.PROP_FILE_FILE_ROLLOVER, "" + 10);
- props.put(filePropPrefix + "." + BaseAuditProvider.PROP_QUEUE, "batch");
+ props.put(filePropPrefix + "." + AuditQueue.PROP_QUEUE, "batch");
String batchPropPrefix = filePropPrefix + "." + "batch";
- props.put(batchPropPrefix + "." + BaseAuditProvider.PROP_BATCH_SIZE, ""
+ props.put(batchPropPrefix + "." + AuditQueue.PROP_BATCH_SIZE, ""
+ batchSize);
- props.put(batchPropPrefix + "." + BaseAuditProvider.PROP_QUEUE_SIZE, ""
+ props.put(batchPropPrefix + "." + AuditQueue.PROP_QUEUE_SIZE, ""
+ queueSize);
- props.put(
- batchPropPrefix + "." + BaseAuditProvider.PROP_BATCH_INTERVAL,
+ props.put(batchPropPrefix + "." + AuditQueue.PROP_BATCH_INTERVAL,
"" + intervalMS);
// Enable File Spooling
int destRetryMS = 10;
props.put(batchPropPrefix + "."
- + BaseAuditProvider.PROP_FILE_SPOOL_ENABLE, "" + true);
+ + AuditQueue.PROP_FILE_SPOOL_ENABLE, "" + true);
props.put(batchPropPrefix + "."
+ AuditFileSpool.PROP_FILE_SPOOL_LOCAL_DIR, "target");
props.put(batchPropPrefix + "."
@@ -638,7 +636,7 @@ public class TestAuditQueue {
// queue.init(props, batchPropPrefix);
// queue.start();
- AuditProvider queue = factory.getProvider();
+ AuditHandler queue = factory.getProvider();
for (int i = 0; i < messageToSend; i++) {
queue.log(createEvent());
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/4f3cea22/security-admin/src/test/java/org/apache/ranger/audit/TestConsumer.java
----------------------------------------------------------------------
diff --git a/security-admin/src/test/java/org/apache/ranger/audit/TestConsumer.java b/security-admin/src/test/java/org/apache/ranger/audit/TestConsumer.java
index d4d50f0..136874d 100644
--- a/security-admin/src/test/java/org/apache/ranger/audit/TestConsumer.java
+++ b/security-admin/src/test/java/org/apache/ranger/audit/TestConsumer.java
@@ -39,15 +39,13 @@ public class TestConsumer extends AuditDestination {
int batchCount = 0;
String providerName = getClass().getName();
boolean isDown = false;
- int batchSize = 3;
List<AuthzAuditEvent> eventList = new ArrayList<AuthzAuditEvent>();
/*
* (non-Javadoc)
*
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger
+ * @see org.apache.ranger.audit.provider.AuditProvider#log(org.apache.ranger
* .audit.model.AuditEventBase)
*/
@Override
@@ -144,32 +142,6 @@ public class TestConsumer extends AuditDestination {
public void waitToComplete() {
}
- @Override
- public int getMaxBatchSize() {
- return batchSize;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#isFlushPending()
- */
- @Override
- public boolean isFlushPending() {
- return false;
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#getLastFlushTime()
- */
- @Override
- public long getLastFlushTime() {
- return 0;
- }
-
/*
* (non-Javadoc)
*
@@ -207,8 +179,7 @@ public class TestConsumer extends AuditDestination {
/*
* (non-Javadoc)
*
- * @see
- * org.apache.ranger.audit.provider.AuditProvider#waitToComplete(long)
+ * @see org.apache.ranger.audit.provider.AuditProvider#waitToComplete(long)
*/
@Override
public void waitToComplete(long timeout) {
@@ -225,23 +196,14 @@ public class TestConsumer extends AuditDestination {
return providerName;
}
- /*
- * (non-Javadoc)
- *
- * @see org.apache.ranger.audit.provider.AuditProvider#isDrain()
- */
- @Override
- public boolean isDrain() {
- return false;
- }
-
// Local methods
public AuthzAuditEvent isInSequence() {
- int lastSeq = -1;
+ long lastSeq = -1;
for (AuthzAuditEvent event : eventList) {
if (event.getSeqNum() <= lastSeq) {
return event;
}
+ lastSeq = event.getSeqNum();
}
return null;
}