();
- }
-
- @Override
- public EntityManager getEntityManager() {
- EntityManager em = null;
-
- if(sEntityManager != null) {
- em = sEntityManager.get();
-
- if(em == null && this.emf != null) {
- em = this.emf.createEntityManager();
-
- sEntityManager.set(em);
- }
- } else {
- logger.error("EntityManagerFactory was not set in this thread.", new Throwable());
- }
-
- return em;
- }
-}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/AmazonCloudWatchAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AmazonCloudWatchAuditDestination.java
new file mode 100644
index 0000000000..f3ba77a3ec
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AmazonCloudWatchAuditDestination.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.destination;
+
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Properties;
+import java.util.stream.Collectors;
+
+import com.amazonaws.services.logs.AWSLogs;
+import com.amazonaws.services.logs.AWSLogsClientBuilder;
+import com.amazonaws.services.logs.model.CreateLogStreamRequest;
+import com.amazonaws.services.logs.model.InputLogEvent;
+import com.amazonaws.services.logs.model.InvalidSequenceTokenException;
+import com.amazonaws.services.logs.model.PutLogEventsRequest;
+import com.amazonaws.services.logs.model.PutLogEventsResult;
+import com.amazonaws.services.logs.model.ResourceNotFoundException;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.provider.MiscUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/**
+ * Writes audit events to Amazon CloudWatch Logs.
+ *
+ * Two properties are required: LogGroupName and LogStreamPrefix
+ *
+ * Thread-safety is ensured by making the log method synchronized.
+ * This is to avoid possible race condition on {@link #sequenceToken} which is required in PutLogEvents API.
+ * @see PutLogEvents API Reference
+ *
+ * Note: Amazon CloudWatch has limits on the payload size and request rate.
+ * Based on the traffic, adjust the batch size and flush interval accordingly.
+ *
+ *
+ * @see Amazon CloudWatch Logs Service Limits
+ */
+@ThreadSafe
+public class AmazonCloudWatchAuditDestination extends AuditDestination {
+
+ private static final Logger LOG = LoggerFactory.getLogger(AmazonCloudWatchAuditDestination.class);
+
+ public static final String PROP_LOG_GROUP_NAME = "log_group";
+ public static final String PROP_LOG_STREAM_PREFIX = "log_stream_prefix";
+ public static final String CONFIG_PREFIX = "ranger.audit.amazon_cloudwatch";
+ public static final String PROP_REGION = "region";
+
+ private String logGroupName;
+ private String logStreamName;
+ private AWSLogs logsClient;
+ private String sequenceToken;
+ private String regionName;
+
+ @Override
+ public void init(Properties props, String propPrefix) {
+ LOG.info("init() called for CloudWatchAuditDestination");
+ super.init(props, propPrefix);
+
+ this.logGroupName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_LOG_GROUP_NAME, "ranger_audits");
+ this.logStreamName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_LOG_STREAM_PREFIX) + MiscUtil.generateUniqueId();
+ this.regionName = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_REGION);
+
+ logsClient = getClient(); // Initialize client
+ createLogStream();
+ }
+
+ @Override
+ public void stop() {
+ super.stop();
+ logStatus();
+ }
+
+ @Override
+ public synchronized boolean log(Collection collection) {
+ boolean ret = false;
+ AWSLogs client = getClient();
+
+ PutLogEventsRequest req = new PutLogEventsRequest()
+ .withLogEvents(toInputLogEvent(collection))
+ .withLogGroupName(logGroupName)
+ .withLogStreamName(logStreamName);
+
+ if (StringUtils.isNotBlank(sequenceToken)) {
+ req.setSequenceToken(sequenceToken);
+ }
+
+ try {
+ sequenceToken = pushLogEvents(req, false, client);
+ addSuccessCount(collection.size());
+ ret = true;
+ } catch (Throwable e) {
+ addFailedCount(collection.size());
+ LOG.error("Failed to send audit events", e);
+ }
+
+ return ret;
+ }
+
+ private String pushLogEvents(PutLogEventsRequest req,
+ boolean retryingOnInvalidSeqToken,
+ AWSLogs client) {
+ String sequenceToken;
+ try {
+ PutLogEventsResult re = client.putLogEvents(req);
+ sequenceToken = re.getNextSequenceToken();
+ } catch (ResourceNotFoundException ex) {
+ if (!retryingOnInvalidSeqToken) {
+ createLogStream();
+ return pushLogEvents(req, true, client);
+ }
+ throw ex;
+ } catch (InvalidSequenceTokenException ex) {
+ if (retryingOnInvalidSeqToken) {
+ LOG.error("Unexpected invalid sequence token. Possible race condition occurred");
+ throw ex;
+ }
+
+ // LogStream may exist before first push attempt, re-obtain the sequence token
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Invalid sequence token. Plugin possibly restarted. Updating the sequence token and retrying");
+ }
+ sequenceToken = ex.getExpectedSequenceToken();
+ req.setSequenceToken(sequenceToken);
+ return pushLogEvents(req, true, client);
+ }
+
+ return sequenceToken;
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see org.apache.ranger.audit.provider.AuditProvider#flush()
+ */
+ @Override
+ public void flush() {
+
+ }
+
+ static Collection toInputLogEvent(Collection collection) {
+ return collection.stream()
+ .map(e -> new InputLogEvent()
+ .withMessage(MiscUtil.stringify(e))
+ .withTimestamp(e.getEventTime().getTime()))
+ .sorted(Comparator.comparingLong(InputLogEvent::getTimestamp))
+ .collect(Collectors.toList());
+ }
+
+ private void createLogStream() {
+ AWSLogs client = getClient();
+ CreateLogStreamRequest req = new CreateLogStreamRequest()
+ .withLogGroupName(logGroupName)
+ .withLogStreamName(logStreamName);
+
+ LOG.info(String.format("Creating Log Stream `%s` in Log Group `%s`", logStreamName, logGroupName));
+ client.createLogStream(req);
+ }
+
+ private AWSLogs getClient() {
+ if (logsClient == null) {
+ synchronized (AmazonCloudWatchAuditDestination.class) {
+ if (logsClient == null) {
+ logsClient = newClient();
+ }
+ }
+ }
+
+ return logsClient;
+ }
+
+ private AWSLogs newClient() {
+ if (StringUtils.isBlank(regionName)) {
+ return AWSLogsClientBuilder.standard().build();
+ }
+ return AWSLogsClientBuilder.standard().withRegion(regionName).build();
+ }
+}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java
index 41d0e82783..c221487c24 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/AuditDestination.java
@@ -21,16 +21,16 @@
import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.provider.BaseAuditHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class needs to be extended by anyone who wants to build custom
* destination
*/
public abstract class AuditDestination extends BaseAuditHandler {
- private static final Log logger = LogFactory.getLog(AuditDestination.class);
+ private static final Logger logger = LoggerFactory.getLogger(AuditDestination.class);
public AuditDestination() {
logger.info("AuditDestination() enter");
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/DBAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/DBAuditDestination.java
deleted file mode 100644
index 79f07d7a56..0000000000
--- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/DBAuditDestination.java
+++ /dev/null
@@ -1,326 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.ranger.audit.destination;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import javax.persistence.EntityManager;
-import javax.persistence.EntityManagerFactory;
-import javax.persistence.EntityTransaction;
-import javax.persistence.Persistence;
-
-import org.apache.ranger.audit.dao.DaoManager;
-import org.apache.ranger.audit.entity.AuthzAuditEventDbObj;
-import org.apache.ranger.audit.model.AuditEventBase;
-import org.apache.ranger.audit.provider.MiscUtil;
-
-public class DBAuditDestination extends AuditDestination {
-
- private static final Log logger = LogFactory
- .getLog(DBAuditDestination.class);
-
- public static final String PROP_DB_JDBC_DRIVER = "jdbc.driver";
- public static final String PROP_DB_JDBC_URL = "jdbc.url";
- public static final String PROP_DB_USER = "user";
- public static final String PROP_DB_PASSWORD = "password";
- public static final String PROP_DB_PASSWORD_ALIAS = "password.alias";
-
- private EntityManagerFactory entityManagerFactory;
- private DaoManager daoManager;
-
- private String jdbcDriver = null;
- private String jdbcURL = null;
- private String dbUser = null;
- private String dbPasswordAlias = "auditDBCred";
-
- public DBAuditDestination() {
- logger.info("DBAuditDestination() called");
- }
-
- @Override
- public void init(Properties props, String propPrefix) {
- logger.info("init() called");
- super.init(props, propPrefix);
- // Initial connect
- connect();
-
- // initialize the database related classes
- AuthzAuditEventDbObj.init(props);
- }
-
- /*
- * (non-Javadoc)
- *
- * @see
- * org.apache.ranger.audit.provider.AuditHandler#logger(java.util.Collection
- * )
- */
- @Override
- public boolean log(Collection events) {
- boolean retValue = false;
- logStatusIfRequired();
- addTotalCount(events.size());
-
- if (beginTransaction()) {
- boolean isFailed = false;
- for (AuditEventBase event : events) {
- try {
- event.persist(daoManager);
- } catch (Throwable t) {
- logger.error("Error persisting data. event=" + event, t);
- isFailed = true;
- break;
- }
- }
- if (isFailed) {
- retValue = false;
- rollbackTransaction();
- } else {
- retValue = commitTransaction();
- }
- }
-
- if (retValue) {
- addSuccessCount(events.size());
- } else {
- addDeferredCount(events.size());
- }
- return retValue;
- }
-
- @Override
- public void stop() {
- cleanUp();
- super.stop();
- }
-
- // Local methods
- protected void connect() {
- if (isDbConnected()) {
- return;
- }
- try {
- jdbcDriver = MiscUtil.getStringProperty(props, propPrefix + "."
- + PROP_DB_JDBC_DRIVER);
- jdbcURL = MiscUtil.getStringProperty(props, propPrefix + "."
- + PROP_DB_JDBC_URL);
- dbUser = MiscUtil.getStringProperty(props, propPrefix + "."
- + PROP_DB_USER);
- String dbPasswordFromProp = MiscUtil.getStringProperty(props,
- propPrefix + "." + PROP_DB_PASSWORD);
- String tmpAlias = MiscUtil.getStringProperty(props, propPrefix
- + "." + PROP_DB_PASSWORD_ALIAS);
- dbPasswordAlias = tmpAlias != null ? tmpAlias : dbPasswordAlias;
- String credFile = MiscUtil.getStringProperty(props,
- AUDIT_DB_CREDENTIAL_PROVIDER_FILE);
-
- if (jdbcDriver == null || jdbcDriver.isEmpty()) {
- logger.fatal("JDBC driver not provided. Set property name "
- + propPrefix + "." + PROP_DB_JDBC_DRIVER);
- return;
- }
- if (jdbcURL == null || jdbcURL.isEmpty()) {
- logger.fatal("JDBC URL not provided. Set property name "
- + propPrefix + "." + PROP_DB_JDBC_URL);
- return;
- }
- if (dbUser == null || dbUser.isEmpty()) {
- logger.fatal("DB user not provided. Set property name "
- + propPrefix + "." + PROP_DB_USER);
- return;
- }
- String dbPassword = MiscUtil.getCredentialString(credFile,
- dbPasswordAlias);
-
- if (dbPassword == null || dbPassword.isEmpty()) {
- // If password is not in credential store, let's try password
- // from property
- dbPassword = dbPasswordFromProp;
- }
-
- if (dbPassword == null || dbPassword.isEmpty()) {
- logger.warn("DB password not provided. Will assume it is empty and continue");
- }
- logger.info("JDBC Driver=" + jdbcDriver + ", JDBC URL=" + jdbcURL
- + ", dbUser=" + dbUser + ", passwordAlias="
- + dbPasswordAlias + ", credFile=" + credFile
- + ", usingPassword=" + (dbPassword == null ? "no" : "yes"));
-
- Map dbProperties = new HashMap();
- dbProperties.put("javax.persistence.jdbc.driver", jdbcDriver);
- dbProperties.put("javax.persistence.jdbc.url", jdbcURL);
- dbProperties.put("javax.persistence.jdbc.user", dbUser);
- if (dbPassword != null) {
- dbProperties.put("javax.persistence.jdbc.password", dbPassword);
- }
-
- entityManagerFactory = Persistence.createEntityManagerFactory(
- "xa_server", dbProperties);
-
- logger.info("entityManagerFactory=" + entityManagerFactory);
-
- daoManager = new DaoManager();
- daoManager.setEntityManagerFactory(entityManagerFactory);
-
- // this forces the connection to be made to DB
- if (daoManager.getEntityManager() == null) {
- logger.error("Error connecting audit database. EntityManager is null. dbURL="
- + jdbcURL + ", dbUser=" + dbUser);
- } else {
- logger.info("Connected to audit database. dbURL=" + jdbcURL
- + ", dbUser=" + dbUser);
- }
-
- } catch (Throwable t) {
- logger.error("Error connecting audit database. dbURL=" + jdbcURL
- + ", dbUser=" + dbUser, t);
- }
- }
-
- private synchronized void cleanUp() {
- logger.info("DBAuditDestination: cleanUp()");
-
- try {
- if (entityManagerFactory != null && entityManagerFactory.isOpen()) {
- entityManagerFactory.close();
- }
- } catch (Exception excp) {
- logger.error("DBAuditDestination.cleanUp(): failed", excp);
- } finally {
- entityManagerFactory = null;
- daoManager = null;
- }
- logStatus();
- }
-
- private EntityManager getEntityManager() {
- DaoManager daoMgr = daoManager;
-
- if (daoMgr != null) {
- try {
- return daoMgr.getEntityManager();
- } catch (Exception excp) {
- logger.error("DBAuditDestination.getEntityManager(): failed",
- excp);
-
- cleanUp();
- }
- }
-
- return null;
- }
-
- private boolean isDbConnected() {
- EntityManager em = getEntityManager();
- return em != null && em.isOpen();
- }
-
- private void clearEntityManager() {
- try {
- EntityManager em = getEntityManager();
-
- if (em != null) {
- em.clear();
- }
- } catch (Exception excp) {
- logger.warn("DBAuditDestination.clearEntityManager(): failed", excp);
- }
- }
-
- private EntityTransaction getTransaction() {
- if (!isDbConnected()) {
- connect();
- }
-
- EntityManager em = getEntityManager();
-
- return em != null ? em.getTransaction() : null;
- }
-
- private boolean beginTransaction() {
- EntityTransaction trx = getTransaction();
-
- if (trx != null && !trx.isActive()) {
- trx.begin();
- }
-
- if (trx == null) {
- logger.warn("DBAuditDestination.beginTransaction(): trx is null");
- }
-
- return trx != null;
- }
-
- private boolean commitTransaction() {
- boolean ret = false;
- EntityTransaction trx = null;
-
- try {
- trx = getTransaction();
-
- if (trx != null && trx.isActive()) {
- trx.commit();
- ret = true;
- } else {
- throw new Exception("trx is null or not active");
- }
- } catch (Throwable excp) {
- logger.error("DBAuditDestination.commitTransaction(): failed", excp);
-
- cleanUp(); // so that next insert will try to init()
- } finally {
- clearEntityManager();
- }
-
- return ret;
- }
-
- private boolean rollbackTransaction() {
- boolean ret = false;
- EntityTransaction trx = null;
-
- try {
- trx = getTransaction();
-
- if (trx != null && trx.isActive()) {
- trx.rollback();
- ret = true;
- } else {
- throw new Exception("trx is null or not active");
- }
- } catch (Throwable excp) {
- logger.error("DBAuditDestination.rollbackTransaction(): failed",
- excp);
-
- cleanUp(); // so that next insert will try to init()
- } finally {
- clearEntityManager();
- }
-
- return ret;
- }
-
-}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/ElasticSearchAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/ElasticSearchAuditDestination.java
index d4897a406e..8324d998b2 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/ElasticSearchAuditDestination.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/ElasticSearchAuditDestination.java
@@ -29,11 +29,13 @@
import java.util.Locale;
import java.util.Map;
import java.util.Properties;
+import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthSchemeProvider;
import org.apache.http.client.CredentialsProvider;
@@ -55,12 +57,14 @@
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosTicket;
public class ElasticSearchAuditDestination extends AuditDestination {
- private static final Log LOG = LogFactory.getLog(ElasticSearchAuditDestination.class);
+ private static final Logger LOG = LoggerFactory.getLogger(ElasticSearchAuditDestination.class);
public static final String CONFIG_URLS = "urls";
public static final String CONFIG_PORT = "port";
@@ -71,8 +75,8 @@ public class ElasticSearchAuditDestination extends AuditDestination {
public static final String CONFIG_PREFIX = "ranger.audit.elasticsearch";
public static final String DEFAULT_INDEX = "ranger_audits";
- private String index = "index";
- private volatile RestHighLevelClient client = null;
+ private String index = CONFIG_INDEX;
+ private final AtomicReference clientRef = new AtomicReference<>(null);
private String protocol;
private String user;
private int port;
@@ -125,12 +129,12 @@ public boolean log(Collection events) {
ArrayList eventList = new ArrayList<>(events);
BulkRequest bulkRequest = new BulkRequest();
try {
- for (AuditEventBase event : eventList) {
+ eventList.forEach(event -> {
AuthzAuditEvent authzEvent = (AuthzAuditEvent) event;
String id = authzEvent.getEventId();
Map doc = toDoc(authzEvent);
bulkRequest.add(new IndexRequest(index).id(id).source(doc));
- }
+ });
} catch (Exception ex) {
addFailedCount(eventList.size());
logFailedEvent(eventList, ex);
@@ -148,7 +152,7 @@ public boolean log(Collection events) {
addFailedCount(1);
logFailedEvent(Arrays.asList(itemRequest), itemResponse.getFailureMessage());
} else {
- if(LOG.isDebugEnabled()) {
+ if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Indexed %s", itemRequest.getEventKey()));
}
addSuccessCount(1);
@@ -170,7 +174,7 @@ public boolean log(Collection events) {
*/
@Override
public void flush() {
-
+ // Empty flush method
}
public boolean isAsync() {
@@ -178,20 +182,24 @@ public boolean isAsync() {
}
synchronized RestHighLevelClient getClient() {
+ RestHighLevelClient client = clientRef.get();
if (client == null) {
synchronized (ElasticSearchAuditDestination.class) {
+ client = clientRef.get();
if (client == null) {
client = newClient();
+ clientRef.set(client);
}
}
}
if (subject != null) {
KerberosTicket ticket = CredentialsProviderUtil.getTGT(subject);
try {
- if (new Date().getTime() > ticket.getEndTime().getTime()){
- client = null;
+ if (new Date().getTime() > ticket.getEndTime().getTime()) {
+ clientRef.set(null);
CredentialsProviderUtil.ticketExpireTime80 = 0;
- newClient();
+ client = newClient();
+ clientRef.set(client);
} else if (CredentialsProviderUtil.ticketWillExpire(ticket)) {
subject = CredentialsProviderUtil.login(user, password);
}
@@ -209,8 +217,12 @@ public static RestClientBuilder getRestClientBuilder(String urls, String protoco
RestClientBuilder restClientBuilder = RestClient.builder(
MiscUtil.toArray(urls, ",").stream()
.map(x -> new HttpHost(x, port, protocol))
- .toArray(i -> new HttpHost[i])
+ .toArray(HttpHost[]::new)
);
+ ThreadFactory clientThreadFactory = new ThreadFactoryBuilder()
+ .setNameFormat("ElasticSearch rest client %s")
+ .setDaemon(true)
+ .build();
if (StringUtils.isNotBlank(user) && StringUtils.isNotBlank(password) && !user.equalsIgnoreCase("NONE") && !password.equalsIgnoreCase("NONE")) {
if (password.contains("keytab") && new File(password).exists()) {
final KerberosCredentialsProvider credentialsProvider =
@@ -218,6 +230,7 @@ public static RestClientBuilder getRestClientBuilder(String urls, String protoco
Lookup authSchemeRegistry = RegistryBuilder.create()
.register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory()).build();
restClientBuilder.setHttpClientConfigCallback(clientBuilder -> {
+ clientBuilder.setThreadFactory(clientThreadFactory);
clientBuilder.setDefaultCredentialsProvider(credentialsProvider);
clientBuilder.setDefaultAuthSchemeRegistry(authSchemeRegistry);
return clientBuilder;
@@ -225,14 +238,20 @@ public static RestClientBuilder getRestClientBuilder(String urls, String protoco
} else {
final CredentialsProvider credentialsProvider =
CredentialsProviderUtil.getBasicCredentials(user, password);
- restClientBuilder.setHttpClientConfigCallback(clientBuilder ->
- clientBuilder.setDefaultCredentialsProvider(credentialsProvider));
+ restClientBuilder.setHttpClientConfigCallback(clientBuilder -> {
+ clientBuilder.setThreadFactory(clientThreadFactory);
+ clientBuilder.setDefaultCredentialsProvider(credentialsProvider);
+ return clientBuilder;
+ });
}
} else {
LOG.error("ElasticSearch Credentials not provided!!");
final CredentialsProvider credentialsProvider = null;
- restClientBuilder.setHttpClientConfigCallback(clientBuilder ->
- clientBuilder.setDefaultCredentialsProvider(credentialsProvider));
+ restClientBuilder.setHttpClientConfigCallback(clientBuilder -> {
+ clientBuilder.setThreadFactory(clientThreadFactory);
+ clientBuilder.setDefaultCredentialsProvider(credentialsProvider);
+ return clientBuilder;
+ });
}
return restClientBuilder;
}
@@ -244,30 +263,31 @@ private RestHighLevelClient newClient() {
}
RestClientBuilder restClientBuilder =
getRestClientBuilder(hosts, protocol, user, password, port);
- RestHighLevelClient restHighLevelClient = new RestHighLevelClient(restClientBuilder);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Initialized client");
- }
- boolean exits = false;
- try {
- exits = restHighLevelClient.indices().open(new OpenIndexRequest(this.index), RequestOptions.DEFAULT).isShardsAcknowledged();
- } catch (Exception e) {
- LOG.warn("Error validating index " + this.index);
- }
- if(exits) {
+ try (RestHighLevelClient restHighLevelClient = new RestHighLevelClient(restClientBuilder)) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Index exists");
+ LOG.debug("Initialized client");
}
- } else {
- LOG.info("Index does not exist");
+ boolean exists = false;
+ try {
+ exists = restHighLevelClient.indices().open(new OpenIndexRequest(this.index), RequestOptions.DEFAULT).isShardsAcknowledged();
+ } catch (Exception e) {
+ LOG.warn("Error validating index " + this.index);
+ }
+ if (exists) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Index exists");
+ }
+ } else {
+ LOG.info("Index does not exist");
+ }
+ return restHighLevelClient;
}
- return restHighLevelClient;
} catch (Throwable t) {
lastLoggedAt.updateAndGet(lastLoggedAt -> {
long now = System.currentTimeMillis();
long elapsed = now - lastLoggedAt;
if (elapsed > TimeUnit.MINUTES.toMillis(1)) {
- LOG.fatal("Can't connect to ElasticSearch server: " + connectionString(), t);
+ LOG.error("Can't connect to ElasticSearch server: " + connectionString(), t);
return now;
} else {
return lastLoggedAt;
@@ -297,7 +317,7 @@ private String getStringProperty(Properties props, String propName, String defau
}
Map toDoc(AuthzAuditEvent auditEvent) {
- Map doc = new HashMap();
+ Map doc = new HashMap<>();
doc.put("id", auditEvent.getEventId());
doc.put("access", auditEvent.getAccessType());
doc.put("enforcer", auditEvent.getAclEnforcer());
@@ -320,6 +340,8 @@ Map toDoc(AuthzAuditEvent auditEvent) {
doc.put("event_count", auditEvent.getEventCount());
doc.put("event_dur_ms", auditEvent.getEventDurationMS());
doc.put("tags", auditEvent.getTags());
+ doc.put("datasets", auditEvent.getDatasets());
+ doc.put("projects", auditEvent.getProjects());
doc.put("cluster", auditEvent.getClusterName());
doc.put("zoneName", auditEvent.getZoneName());
doc.put("agentHost", auditEvent.getAgentHostname());
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java
index 609b98520d..2bab08ac2f 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/FileAuditDestination.java
@@ -29,17 +29,17 @@
import java.util.List;
import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.provider.MiscUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class write the logs to local file
*/
public class FileAuditDestination extends AuditDestination {
- private static final Log logger = LogFactory
- .getLog(FileAuditDestination.class);
+ private static final Logger logger = LoggerFactory
+ .getLogger(FileAuditDestination.class);
public static final String PROP_FILE_LOCAL_DIR = "dir";
public static final String PROP_FILE_LOCAL_FILE_NAME_FORMAT = "filename.format";
@@ -75,31 +75,24 @@ public void init(Properties prop, String propPrefix) {
+ PROP_FILE_FILE_ROLLOVER, fileRolloverSec);
if (logFolderProp == null || logFolderProp.isEmpty()) {
- logger.error("File destination folder is not configured. Please set "
- + propPrefix
- + "."
- + PROP_FILE_LOCAL_DIR
- + ". name="
- + getName());
+ logger.error("File destination folder is not configured. Please set {}. {}. name= {}", propPrefix, PROP_FILE_LOCAL_DIR, getName());
return;
}
logFolder = new File(logFolderProp);
if (!logFolder.isDirectory()) {
logFolder.mkdirs();
if (!logFolder.isDirectory()) {
- logger.error("FileDestination folder not found and can't be created. folder="
- + logFolder.getAbsolutePath() + ", name=" + getName());
+ logger.error("FileDestination folder not found and can't be created. folder={}, name={}", logFolder.getAbsolutePath(), getName());
return;
}
}
- logger.info("logFolder=" + logFolder + ", name=" + getName());
+ logger.info("logFolder={}, name={}", logFolder, getName());
if (logFileNameFormat == null || logFileNameFormat.isEmpty()) {
logFileNameFormat = "%app-type%_ranger_audit.log";
}
- logger.info("logFileNameFormat=" + logFileNameFormat + ", destName="
- + getName());
+ logger.info("logFileNameFormat={}, destName={}", logFileNameFormat, getName());
initDone = true;
}
@@ -110,7 +103,7 @@ synchronized public boolean logJSON(Collection events) {
addTotalCount(events.size());
if (isStopped) {
- logError("log() called after stop was requested. name=" + getName());
+ logError("logJSON() called after stop was requested. name={}", getName());
addDeferredCount(events.size());
return false;
}
@@ -141,7 +134,7 @@ public boolean log(Collection events) {
if (isStopped) {
addTotalCount(events.size());
addDeferredCount(events.size());
- logError("log() called after stop was requested. name=" + getName());
+ logError("log() called after stop was requested. name={}", getName());
return false;
}
List jsonList = new ArrayList();
@@ -152,7 +145,7 @@ public boolean log(Collection events) {
addTotalCount(1);
addFailedCount(1);
logFailedEvent(event);
- logger.error("Error converting to JSON. event=" + event);
+ logger.error("Error converting to JSON. event={}", event);
}
}
return logJSON(jsonList);
@@ -178,8 +171,7 @@ synchronized public void stop() {
logWriter.flush();
logWriter.close();
} catch (Throwable t) {
- logger.error("Error on closing log writter. Exception will be ignored. name="
- + getName() + ", fileName=" + currentFileName);
+ logger.error("Error on closing log writer. Exception will be ignored. name= {}, fileName= {}", getName(), currentFileName);
}
logWriter = null;
}
@@ -211,16 +203,14 @@ synchronized private PrintWriter getLogFileStream() throws Exception {
if (!newLogFile.exists()) {
// Move the file
if (!outLogFile.renameTo(newLogFile)) {
- logger.error("Error renameing file. " + outLogFile
- + " to " + newLogFile);
+ logger.error("Error renameing file. {} to {} " , outLogFile, newLogFile);
}
break;
}
}
}
if (!outLogFile.exists()) {
- logger.info("Creating new file. destName=" + getName()
- + ", fileName=" + fileName);
+ logger.info("Creating new file. destName={} , fileName={} ", getName(), fileName);
// Open the file
logWriter = new PrintWriter(new BufferedWriter(new FileWriter(
outLogFile)));
@@ -239,14 +229,12 @@ private void closeFileIfNeeded() {
return;
}
if (System.currentTimeMillis() - fileCreateTime.getTime() > fileRolloverSec * 1000) {
- logger.info("Closing file. Rolling over. name=" + getName()
- + ", fileName=" + currentFileName);
+ logger.info("Closing file. Rolling over. name={} , fileName={}", getName(), currentFileName);
try {
logWriter.flush();
logWriter.close();
} catch (Throwable t) {
- logger.error("Error on closing log writter. Exception will be ignored. name="
- + getName() + ", fileName=" + currentFileName);
+ logger.error("Error on closing log writter. Exception will be ignored. name={} , fileName={}", getName(), currentFileName);
}
logWriter = null;
currentFileName = null;
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java
index 906ff341f5..4ad8dfd985 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/HDFSAuditDestination.java
@@ -19,114 +19,46 @@
package org.apache.ranger.audit.destination;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.net.URI;
-import java.security.PrivilegedAction;
+import java.io.File;
import java.security.PrivilegedExceptionAction;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.provider.AuditWriterFactory;
import org.apache.ranger.audit.provider.MiscUtil;
-import org.apache.ranger.audit.utils.RollingTimeUtil;
+import org.apache.ranger.audit.utils.RangerAuditWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* This class write the logs to local file
*/
public class HDFSAuditDestination extends AuditDestination {
- private static final Log logger = LogFactory
- .getLog(HDFSAuditDestination.class);
+ private static final Logger logger = LoggerFactory
+ .getLogger(HDFSAuditDestination.class);
- public static final String PROP_HDFS_DIR = "dir";
- public static final String PROP_HDFS_SUBDIR = "subdir";
- public static final String PROP_HDFS_FILE_NAME_FORMAT = "filename.format";
- public static final String PROP_HDFS_ROLLOVER = "file.rollover.sec";
- public static final String PROP_HDFS_ROLLOVER_PERIOD = "file.rollover.period";
-
- int fileRolloverSec = 24 * 60 * 60; // In seconds
-
- private String logFileNameFormat;
-
- private String rolloverPeriod;
-
- boolean initDone = false;
-
- private String logFolder;
-
- private PrintWriter logWriter = null;
- volatile FSDataOutputStream ostream = null; // output stream wrapped in logWriter
-
- private String currentFileName;
-
- private boolean isStopped = false;
-
- private RollingTimeUtil rollingTimeUtil = null;
-
- private Date nextRollOverTime = null;
-
- private boolean rollOverByDuration = false;
+ private Map auditConfigs = null;
+ private String auditProviderName = null;
+ private RangerAuditWriter auditWriter = null;
+ private boolean initDone = false;
+ private boolean isStopped = false;
@Override
public void init(Properties prop, String propPrefix) {
super.init(prop, propPrefix);
-
- // Initialize properties for this class
- // Initial folder and file properties
- String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
- + "." + PROP_HDFS_DIR);
- if (logFolderProp == null || logFolderProp.isEmpty()) {
- logger.fatal("File destination folder is not configured. Please set "
- + propPrefix + "." + PROP_HDFS_DIR + ". name=" + getName());
- return;
- }
-
- String logSubFolder = MiscUtil.getStringProperty(props, propPrefix
- + "." + PROP_HDFS_SUBDIR);
- if (logSubFolder == null || logSubFolder.isEmpty()) {
- logSubFolder = "%app-type%/%time:yyyyMMdd%";
- }
-
- logFileNameFormat = MiscUtil.getStringProperty(props, propPrefix + "."
- + PROP_HDFS_FILE_NAME_FORMAT);
- fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "."
- + PROP_HDFS_ROLLOVER, fileRolloverSec);
-
- if (logFileNameFormat == null || logFileNameFormat.isEmpty()) {
- logFileNameFormat = "%app-type%_ranger_audit_%hostname%" + ".log";
- }
-
- logFolder = logFolderProp + "/" + logSubFolder;
- logger.info("logFolder=" + logFolder + ", destName=" + getName());
- logger.info("logFileNameFormat=" + logFileNameFormat + ", destName="
- + getName());
- logger.info("config=" + configProps.toString());
-
- rolloverPeriod = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_HDFS_ROLLOVER_PERIOD);
- rollingTimeUtil = RollingTimeUtil.getInstance();
-
- //file.rollover.period is used for rolling over. If it could compute the next roll over time using file.rollover.period
- //it fall back to use file.rollover.sec for find next rollover time. If still couldn't find default will be 1day window
- //for rollover.
- if(StringUtils.isEmpty(rolloverPeriod) ) {
- rolloverPeriod = rollingTimeUtil.convertRolloverSecondsToRolloverPeriod(fileRolloverSec);
- }
+ this.auditProviderName = getName();
+ this.auditConfigs = configProps;
try {
- nextRollOverTime = rollingTimeUtil.computeNextRollingTime(rolloverPeriod);
- } catch ( Exception e) {
- logger.warn("Rollover by file.rollover.period failed...will be using the file.rollover.sec for hdfs audit file rollover...",e);
- rollOverByDuration = true;
- nextRollOverTime = rollOverByDuration();
+ this.auditWriter = getWriter();
+ this.initDone = true;
+ } catch (Exception e) {
+ logger.error("Error while getting Audit writer", e);
}
- initDone = true;
}
@Override
@@ -140,62 +72,76 @@ synchronized public boolean logJSON(final Collection events) {
}
if (isStopped) {
addDeferredCount(events.size());
- logError("log() called after stop was requested. name=" + getName());
+ logError("log() called after stop was requested. name={}", getName());
return false;
}
-
- PrintWriter out = null;
try {
+ boolean ret = auditWriter.log(events);
+ if (!ret) {
+ addDeferredCount(events.size());
+ return false;
+ }
+ } catch (Throwable t) {
+ addDeferredCount(events.size());
+ logError("Error writing to log file.", t);
+ return false;
+ } finally {
if (logger.isDebugEnabled()) {
- logger.debug("UGI=" + MiscUtil.getUGILoginUser()
- + ". Will write to HDFS file=" + currentFileName);
+ logger.debug("Flushing HDFS audit. Event Size:{}", events.size());
+ }
+ if (auditWriter != null) {
+ flush();
}
+ }
+ addSuccessCount(events.size());
+ return true;
+ }
- out = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() {
- @Override
- public PrintWriter run() throws Exception {
- PrintWriter out = getLogFileStream();
- for (String event : events) {
- out.println(event);
- }
- return out;
- };
- });
+ @Override
+ synchronized public boolean logFile(final File file) {
+ logStatusIfRequired();
+ if (!initDone) {
+ return false;
+ }
+ if (isStopped) {
+ logError("log() called after stop was requested. name={}", getName());
+ return false;
+ }
- // flush and check the stream for errors
- if (out.checkError()) {
- // In theory, this count may NOT be accurate as part of the messages may have been successfully written.
- // However, in practice, since client does buffering, either all of none would succeed.
- addDeferredCount(events.size());
- out.close();
- logWriter = null;
- ostream = null;
+ try {
+ boolean ret = auditWriter.logFile(file);
+ if (!ret) {
return false;
}
} catch (Throwable t) {
- addDeferredCount(events.size());
logError("Error writing to log file.", t);
return false;
} finally {
- logger.info("Flushing HDFS audit. Event Size:" + events.size());
- if (out != null) {
+ logger.info("Flushing HDFS audit. File:{}{}", file.getAbsolutePath(), file.getName());
+ if (auditWriter != null) {
flush();
}
}
- addSuccessCount(events.size());
return true;
}
@Override
public void flush() {
- logger.info("Flush called. name=" + getName());
- MiscUtil.executePrivilegedAction(new PrivilegedAction() {
- @Override
- public Void run() {
- hflush();
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> HDFSAuditDestination.flush() called. name={}", getName());
+ }
+ try {
+ MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> {
+ auditWriter.flush();
return null;
- }
- });
+ });
+ } catch (Exception excp) {
+ logger.error("HDFSAuditDestination.flush() failed", excp);
+ }
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== HDFSAuditDestination.flush() called. name={}", getName());
+ }
}
/*
@@ -210,7 +156,7 @@ public boolean log(Collection events) {
logStatusIfRequired();
addTotalCount(events.size());
addDeferredCount(events.size());
- logError("log() called after stop was requested. name=" + getName());
+ logError("log() called after stop was requested. name={}", getName());
return false;
}
List jsonList = new ArrayList();
@@ -218,7 +164,7 @@ public boolean log(Collection events) {
try {
jsonList.add(MiscUtil.stringify(event));
} catch (Throwable t) {
- logger.error("Error converting to JSON. event=" + event);
+ logger.error("Error converting to JSON. event={}", event);
addTotalCount(1);
addFailedCount(1);
logFailedEvent(event);
@@ -241,151 +187,14 @@ public void start() {
@Override
synchronized public void stop() {
- isStopped = true;
- if (logWriter != null) {
- try {
- logWriter.flush();
- logWriter.close();
- } catch (Throwable t) {
- logger.error("Error on closing log writter. Exception will be ignored. name="
- + getName() + ", fileName=" + currentFileName);
- }
- logWriter = null;
- ostream = null;
- }
+ auditWriter.stop();
logStatus();
+ isStopped = true;
}
- // Helper methods in this class
- synchronized private PrintWriter getLogFileStream() throws Exception {
- closeFileIfNeeded();
-
- // Either there are no open log file or the previous one has been rolled
- // over
- if (logWriter == null) {
- Date currentTime = new Date();
- // Create a new file
- String fileName = MiscUtil.replaceTokens(logFileNameFormat,
- currentTime.getTime());
- String parentFolder = MiscUtil.replaceTokens(logFolder,
- currentTime.getTime());
- Configuration conf = createConfiguration();
-
- String fullPath = parentFolder + Path.SEPARATOR + fileName;
- String defaultPath = fullPath;
- URI uri = URI.create(fullPath);
- FileSystem fileSystem = FileSystem.get(uri, conf);
-
- Path hdfPath = new Path(fullPath);
- logger.info("Checking whether log file exists. hdfPath=" + fullPath + ", UGI=" + MiscUtil.getUGILoginUser());
- int i = 0;
- while (fileSystem.exists(hdfPath)) {
- i++;
- int lastDot = defaultPath.lastIndexOf('.');
- String baseName = defaultPath.substring(0, lastDot);
- String extension = defaultPath.substring(lastDot);
- fullPath = baseName + "." + i + extension;
- hdfPath = new Path(fullPath);
- logger.info("Checking whether log file exists. hdfPath="
- + fullPath);
- }
- logger.info("Log file doesn't exists. Will create and use it. hdfPath="
- + fullPath);
- // Create parent folders
- createParents(hdfPath, fileSystem);
-
- // Create the file to write
- logger.info("Creating new log file. hdfPath=" + fullPath);
- ostream = fileSystem.create(hdfPath);
- logWriter = new PrintWriter(ostream);
- currentFileName = fullPath;
- }
- return logWriter;
- }
-
- Configuration createConfiguration() {
- Configuration conf = new Configuration();
- for (Map.Entry entry : configProps.entrySet()) {
- String key = entry.getKey();
- String value = entry.getValue();
- // for ease of install config file may contain properties with empty value, skip those
- if (StringUtils.isNotEmpty(value)) {
- conf.set(key, value);
- }
- logger.info("Adding property to HDFS config: " + key + " => " + value);
- }
-
- logger.info("Returning HDFS Filesystem Config: " + conf.toString());
- return conf;
- }
-
- private void createParents(Path pathLogfile, FileSystem fileSystem)
- throws Exception {
- logger.info("Creating parent folder for " + pathLogfile);
- Path parentPath = pathLogfile != null ? pathLogfile.getParent() : null;
-
- if (parentPath != null && fileSystem != null
- && !fileSystem.exists(parentPath)) {
- fileSystem.mkdirs(parentPath);
- }
- }
-
- private void closeFileIfNeeded() throws FileNotFoundException, IOException {
- if (logWriter == null) {
- return;
- }
-
- if ( System.currentTimeMillis() > nextRollOverTime.getTime() ) {
- logger.info("Closing file. Rolling over. name=" + getName()
- + ", fileName=" + currentFileName);
- try {
- logWriter.flush();
- logWriter.close();
- } catch (Throwable t) {
- logger.error("Error on closing log writter. Exception will be ignored. name="
- + getName() + ", fileName=" + currentFileName);
- }
-
- logWriter = null;
- ostream = null;
- currentFileName = null;
-
- if (!rollOverByDuration) {
- try {
- if(StringUtils.isEmpty(rolloverPeriod) ) {
- rolloverPeriod = rollingTimeUtil.convertRolloverSecondsToRolloverPeriod(fileRolloverSec);
- }
- nextRollOverTime = rollingTimeUtil.computeNextRollingTime(rolloverPeriod);
- } catch ( Exception e) {
- logger.warn("Rollover by file.rollover.period failed...will be using the file.rollover.sec for hdfs audit file rollover...",e);
- nextRollOverTime = rollOverByDuration();
- }
- } else {
- nextRollOverTime = rollOverByDuration();
- }
- }
- }
-
- private void hflush() {
- if (ostream != null) {
- try {
- synchronized (this) {
- if (ostream != null)
- // 1) PrinterWriter does not have bufferring of its own so
- // we need to flush its underlying stream
- // 2) HDFS flush() does not really flush all the way to disk.
- ostream.hflush();
- logger.info("Flush HDFS audit logs completed.....");
- }
- } catch (IOException e) {
- logger.error("Error on flushing log writer: " + e.getMessage() +
- "\nException will be ignored. name=" + getName() + ", fileName=" + currentFileName);
- }
- }
- }
-
- private Date rollOverByDuration() {
- long rollOverTime = rollingTimeUtil.computeNextRollingTime(fileRolloverSec,nextRollOverTime);
- return new Date(rollOverTime);
+ public RangerAuditWriter getWriter() throws Exception {
+ AuditWriterFactory auditWriterFactory = AuditWriterFactory.getInstance();
+ auditWriterFactory.init(props, propPrefix, auditProviderName, auditConfigs);
+ return auditWriterFactory.getAuditWriter();
}
}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/Log4JAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/Log4JAuditDestination.java
index 1dd35c92a7..6da3e75875 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/Log4JAuditDestination.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/Log4JAuditDestination.java
@@ -22,16 +22,16 @@
import java.util.Collection;
import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.provider.MiscUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class Log4JAuditDestination extends AuditDestination {
- private static final Log logger = LogFactory
- .getLog(Log4JAuditDestination.class);
+ private static final Logger logger = LoggerFactory
+ .getLogger(Log4JAuditDestination.class);
- private static Log auditLogger = null;
+ private static Logger auditLogger = null;
public static final String PROP_LOG4J_LOGGER = "logger";
public static final String DEFAULT_LOGGER_PREFIX = "ranger.audit";
@@ -54,7 +54,7 @@ public void init(Properties prop, String propPrefix) {
+ loggerName);
}
logger.info("Logger name for " + getName() + " is " + loggerName);
- auditLogger = LogFactory.getLog(loggerName);
+ auditLogger = LoggerFactory.getLogger(loggerName);
logger.info("Done initializing logger for audit. name=" + getName()
+ ", loggerName=" + loggerName);
}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/destination/SolrAuditDestination.java b/agents-audit/src/main/java/org/apache/ranger/audit/destination/SolrAuditDestination.java
index 7631e5882e..017e15b2e8 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/destination/SolrAuditDestination.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/destination/SolrAuditDestination.java
@@ -20,13 +20,13 @@
package org.apache.ranger.audit.destination;
import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
import org.apache.ranger.audit.provider.MiscUtil;
import org.apache.ranger.audit.utils.InMemoryJAASConfiguration;
-import org.apache.ranger.audit.utils.SolrAppUtil;
+import org.apache.ranger.audit.utils.KerberosAction;
+import org.apache.ranger.audit.utils.KerberosUser;
+import org.apache.ranger.audit.utils.KerberosJAASConfigUser;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpClientUtil;
@@ -36,6 +36,8 @@
import org.apache.solr.client.solrj.response.UpdateResponse;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrInputDocument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
@@ -62,14 +64,15 @@
import javax.net.ssl.SSLContext;
import javax.net.ssl.TrustManager;
import javax.net.ssl.TrustManagerFactory;
+import javax.security.auth.login.LoginException;
import java.util.Arrays;
import java.util.Optional;
public class SolrAuditDestination extends AuditDestination {
- private static final Log LOG = LogFactory
- .getLog(SolrAuditDestination.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(SolrAuditDestination.class);
public static final String PROP_SOLR_URLS = "urls";
public static final String PROP_SOLR_ZK = "zookeepers";
@@ -80,6 +83,7 @@ public class SolrAuditDestination extends AuditDestination {
public static final String PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG = "java.security.auth.login.config";
private volatile SolrClient solrClient = null;
+ private volatile KerberosUser kerberosUser = null;
public SolrAuditDestination() {
}
@@ -94,8 +98,28 @@ public void init(Properties props, String propPrefix) {
@Override
public void stop() {
- super.stop();
+ LOG.info("SolrAuditDestination.stop() called..");
logStatus();
+
+ if (solrClient != null) {
+ try {
+ solrClient.close();
+ } catch (IOException ioe) {
+ LOG.error("Error while stopping slor!", ioe);
+ } finally {
+ solrClient = null;
+ }
+ }
+
+ if (kerberosUser != null) {
+ try {
+ kerberosUser.logout();
+ } catch (LoginException excp) {
+ LOG.error("Error logging out keytab user", excp);
+ } finally {
+ kerberosUser = null;
+ }
+ }
}
synchronized void connect() {
@@ -139,98 +163,60 @@ synchronized void connect() {
if (zkHosts != null && !zkHosts.isEmpty()) {
LOG.info("Connecting to solr cloud using zkHosts="
+ zkHosts);
- try {
- // Instantiate
- Krb5HttpClientBuilder krbBuild = new Krb5HttpClientBuilder();
- SolrHttpClientBuilder kb = krbBuild.getBuilder();
- HttpClientUtil.setHttpClientBuilder(kb);
-
- final List zkhosts = new ArrayList(Arrays.asList(zkHosts.split(",")));
- final CloudSolrClient solrCloudClient = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() {
- @Override
- public CloudSolrClient run() throws Exception {
- CloudSolrClient solrCloudClient = new CloudSolrClient.Builder(zkhosts, Optional.empty()).build();
- return solrCloudClient;
- };
- });
-
- solrCloudClient.setDefaultCollection(collectionName);
- me = solrClient = solrCloudClient;
+ try (Krb5HttpClientBuilder krbBuild = new Krb5HttpClientBuilder()) {
+ SolrHttpClientBuilder kb = krbBuild.getBuilder();
+ HttpClientUtil.setHttpClientBuilder(kb);
+
+ final List zkhosts = new ArrayList(Arrays.asList(zkHosts.split(",")));
+ final CloudSolrClient solrCloudClient = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() {
+ @Override
+ public CloudSolrClient run() throws Exception {
+ CloudSolrClient solrCloudClient = new CloudSolrClient.Builder(zkhosts, Optional.empty()).build();
+ return solrCloudClient;
+ }
+
+ ;
+ });
+
+ solrCloudClient.setDefaultCollection(collectionName);
+ me = solrClient = solrCloudClient;
} catch (Throwable t) {
- LOG.fatal("Can't connect to Solr server. ZooKeepers="
+ LOG.error("Can't connect to Solr server. ZooKeepers="
+ zkHosts, t);
}
- finally {
- resetInitializerInSOLR();
- }
} else if (solrURLs != null && !solrURLs.isEmpty()) {
- try {
- LOG.info("Connecting to Solr using URLs=" + solrURLs);
- Krb5HttpClientBuilder krbBuild = new Krb5HttpClientBuilder();
- SolrHttpClientBuilder kb = krbBuild.getBuilder();
- HttpClientUtil.setHttpClientBuilder(kb);
- final List solrUrls = solrURLs;
- final LBHttpSolrClient lbSolrClient = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() {
- @Override
- public LBHttpSolrClient run() throws Exception {
- LBHttpSolrClient.Builder builder = new LBHttpSolrClient.Builder();
- builder.withBaseSolrUrl(solrUrls.get(0));
- builder.withConnectionTimeout(1000);
- LBHttpSolrClient lbSolrClient = builder.build();
- return lbSolrClient;
- };
- });
-
- for (int i = 1; i < solrURLs.size(); i++) {
- lbSolrClient.addSolrServer(solrURLs.get(i));
- }
- me = solrClient = lbSolrClient;
+ try (Krb5HttpClientBuilder krbBuild = new Krb5HttpClientBuilder()) {
+ LOG.info("Connecting to Solr using URLs=" + solrURLs);
+ SolrHttpClientBuilder kb = krbBuild.getBuilder();
+ HttpClientUtil.setHttpClientBuilder(kb);
+ final List solrUrls = solrURLs;
+ final LBHttpSolrClient lbSolrClient = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() {
+ @Override
+ public LBHttpSolrClient run() throws Exception {
+ LBHttpSolrClient.Builder builder = new LBHttpSolrClient.Builder();
+ builder.withBaseSolrUrl(solrUrls.get(0));
+ builder.withConnectionTimeout(1000);
+ LBHttpSolrClient lbSolrClient = builder.build();
+ return lbSolrClient;
+ }
+
+ ;
+ });
+
+ for (int i = 1; i < solrURLs.size(); i++) {
+ lbSolrClient.addSolrServer(solrURLs.get(i));
+ }
+ me = solrClient = lbSolrClient;
} catch (Throwable t) {
- LOG.fatal("Can't connect to Solr server. URL="
+ LOG.error("Can't connect to Solr server. URL="
+ solrURLs, t);
}
- finally {
- resetInitializerInSOLR();
- }
}
}
}
}
}
-
- private void resetInitializerInSOLR() {
- javax.security.auth.login.Configuration solrConfig = javax.security.auth.login.Configuration.getConfiguration();
- String solrConfigClassName = solrConfig.getClass().getName();
- String solrJassConfigEnd = "SolrJaasConfiguration";
- if (solrConfigClassName.endsWith(solrJassConfigEnd)) {
- try {
- Field f = solrConfig.getClass().getDeclaredField("initiateAppNames");
- if (f != null) {
- f.setAccessible(true);
- HashSet val = new HashSet();
- f.set(solrConfig, val);
- if ( LOG.isDebugEnabled() ) {
- LOG.debug("resetInitializerInSOLR: successfully reset the initiateAppNames");
- }
-
- } else {
- if ( LOG.isDebugEnabled() ) {
- LOG.debug("resetInitializerInSOLR: not applying on class [" + solrConfigClassName + "] as it does not have initiateAppNames variable name.");
- }
- }
- } catch (Throwable t) {
- logError("resetInitializerInSOLR: Unable to reset SOLRCONFIG.initiateAppNames to be empty", t);
- }
- }
- else {
- if ( LOG.isDebugEnabled() ) {
- LOG.debug("resetInitializerInSOLR: not applying on class [" + solrConfigClassName + "] as it does not endwith [" + solrJassConfigEnd + "]");
- }
- }
-
- }
-
@Override
public boolean log(Collection events) {
boolean ret = false;
@@ -255,7 +241,7 @@ public boolean log(Collection events) {
docs.add(document);
}
try {
- final UpdateResponse response = SolrAppUtil.addDocsToSolr(solrClient, docs);
+ final UpdateResponse response = addDocsToSolr(solrClient, docs);
if (response.getStatus() != 0) {
addFailedCount(events.size());
@@ -309,6 +295,8 @@ SolrInputDocument toSolrDoc(AuthzAuditEvent auditEvent) {
doc.setField("event_count", auditEvent.getEventCount());
doc.setField("event_dur_ms", auditEvent.getEventDurationMS());
doc.setField("tags", auditEvent.getTags());
+ doc.addField("datasets", auditEvent.getDatasets());
+ doc.addField("projects", auditEvent.getProjects());
doc.setField("cluster", auditEvent.getClusterName());
doc.setField("zoneName", auditEvent.getZoneName());
doc.setField("agentHost", auditEvent.getAgentHostname());
@@ -336,12 +324,19 @@ private void init() {
LOG.warn("No Client JAAS config present in solr audit config. Ranger Audit to Kerberized Solr will fail...");
}
}
+
LOG.info("Loading SolrClient JAAS config from Ranger audit config if present...");
- InMemoryJAASConfiguration.init(props);
- } catch (Exception e) {
- LOG.error("ERROR: Unable to load SolrClient JAAS config from Audit config file. Audit to Kerberized Solr will fail...", e);
- }
- finally {
+
+ InMemoryJAASConfiguration conf = InMemoryJAASConfiguration.init(props);
+
+ KerberosUser kerberosUser = new KerberosJAASConfigUser("Client", conf);
+
+ if (kerberosUser.getPrincipal() != null) {
+ this.kerberosUser = kerberosUser;
+ }
+ } catch (Exception e) {
+ LOG.error("ERROR: Unable to load SolrClient JAAS config from Audit config file. Audit to Kerberized Solr will fail...", e);
+ } finally {
String confFileName = System.getProperty(PROP_JAVA_SECURITY_AUTH_LOGIN_CONFIG);
LOG.info("In solrAuditDestination.init() (finally) : JAAS Configuration set as [" + confFileName + "]");
}
@@ -455,6 +450,27 @@ private SSLContext getSSLContext(KeyManager[] kmList, TrustManager[] tmList) {
return sslContext;
}
+ private UpdateResponse addDocsToSolr(final SolrClient solrClient, final Collection docs) throws Exception {
+ final UpdateResponse ret;
+
+ try {
+ final PrivilegedExceptionAction action = () -> solrClient.add(docs);
+
+ if (kerberosUser != null) {
+ // execute the privileged action as the given keytab user
+ final KerberosAction kerberosAction = new KerberosAction<>(kerberosUser, action, LOG);
+
+ ret = (UpdateResponse) kerberosAction.execute();
+ } else {
+ ret = action.run();
+ }
+ } catch (Exception e) {
+ throw e;
+ }
+
+ return ret;
+ }
+
private InputStream getFileInputStream(String fileName) throws IOException {
InputStream in = null;
if (StringUtils.isNotEmpty(fileName)) {
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/entity/AuthzAuditEventDbObj.java b/agents-audit/src/main/java/org/apache/ranger/audit/entity/AuthzAuditEventDbObj.java
deleted file mode 100644
index 6830e9504d..0000000000
--- a/agents-audit/src/main/java/org/apache/ranger/audit/entity/AuthzAuditEventDbObj.java
+++ /dev/null
@@ -1,412 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
- package org.apache.ranger.audit.entity;
-
-import java.io.Serializable;
-import java.util.Date;
-import java.util.Properties;
-
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.Id;
-import javax.persistence.GeneratedValue;
-import javax.persistence.GenerationType;
-import javax.persistence.Table;
-import javax.persistence.Temporal;
-import javax.persistence.TemporalType;
-import javax.persistence.SequenceGenerator;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.ranger.audit.model.AuthzAuditEvent;
-import org.apache.ranger.audit.provider.MiscUtil;
-
-/**
- * Entity implementation class for Entity: AuthzAuditEventDbObj
- *
- */
-@Entity
-@Table(name="xa_access_audit")
-public class AuthzAuditEventDbObj implements Serializable {
-
- private static final Log LOG = LogFactory.getLog(AuthzAuditEventDbObj.class);
-
- private static final long serialVersionUID = 1L;
-
- static int MaxValueLengthAccessType = 255;
- static int MaxValueLengthAclEnforcer = 255;
- static int MaxValueLengthAgentId = 255;
- static int MaxValueLengthClientIp = 255;
- static int MaxValueLengthClientType = 255;
- static int MaxValueLengthRepoName = 255;
- static int MaxValueLengthResultReason = 255;
- static int MaxValueLengthSessionId = 255;
- static int MaxValueLengthRequestUser = 255;
- static int MaxValueLengthAction = 2000;
- static int MaxValueLengthRequestData = 4000;
- static int MaxValueLengthResourcePath = 4000;
- static int MaxValueLengthResourceType = 255;
-
- private long auditId;
- private int repositoryType;
- private String repositoryName;
- private String user;
- private Date timeStamp;
- private String accessType;
- private String resourcePath;
- private String resourceType;
- private String action;
- private int accessResult;
- private String agentId;
- private long policyId;
- private String resultReason;
- private String aclEnforcer;
- private String sessionId;
- private String clientType;
- private String clientIP;
- private String requestData;
- private long seqNum;
- private long eventCount;
- private long eventDurationMS;
- private String tags;
-
- public static void init(Properties props)
- {
- LOG.info("AuthzAuditEventDbObj.init()");
-
- final String AUDIT_DB_MAX_COLUMN_VALUE = "xasecure.audit.destination.db.max.column.length";
- MaxValueLengthAccessType = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "access_type", MaxValueLengthAccessType);
- logMaxColumnValue("access_type", MaxValueLengthAccessType);
-
- MaxValueLengthAclEnforcer = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "acl_enforcer", MaxValueLengthAclEnforcer);
- logMaxColumnValue("acl_enforcer", MaxValueLengthAclEnforcer);
-
- MaxValueLengthAction = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "action", MaxValueLengthAction);
- logMaxColumnValue("action", MaxValueLengthAction);
-
- MaxValueLengthAgentId = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "agent_id", MaxValueLengthAgentId);
- logMaxColumnValue("agent_id", MaxValueLengthAgentId);
-
- MaxValueLengthClientIp = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "client_id", MaxValueLengthClientIp);
- logMaxColumnValue("client_id", MaxValueLengthClientIp);
-
- MaxValueLengthClientType = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "client_type", MaxValueLengthClientType);
- logMaxColumnValue("client_type", MaxValueLengthClientType);
-
- MaxValueLengthRepoName = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "repo_name", MaxValueLengthRepoName);
- logMaxColumnValue("repo_name", MaxValueLengthRepoName);
-
- MaxValueLengthResultReason = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "result_reason", MaxValueLengthResultReason);
- logMaxColumnValue("result_reason", MaxValueLengthResultReason);
-
- MaxValueLengthSessionId = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "session_id", MaxValueLengthSessionId);
- logMaxColumnValue("session_id", MaxValueLengthSessionId);
-
- MaxValueLengthRequestUser = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "request_user", MaxValueLengthRequestUser);
- logMaxColumnValue("request_user", MaxValueLengthRequestUser);
-
- MaxValueLengthRequestData = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "request_data", MaxValueLengthRequestData);
- logMaxColumnValue("request_data", MaxValueLengthRequestData);
-
- MaxValueLengthResourcePath = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "resource_path", MaxValueLengthResourcePath);
- logMaxColumnValue("resource_path", MaxValueLengthResourcePath);
-
- MaxValueLengthResourceType = MiscUtil.getIntProperty(props, AUDIT_DB_MAX_COLUMN_VALUE + "." + "resource_type", MaxValueLengthResourceType);
- logMaxColumnValue("resource_type", MaxValueLengthResourceType);
- }
-
- public static void logMaxColumnValue(String columnName, int configuredMaxValueLength) {
- LOG.info("Setting max column value for column[" + columnName + "] to [" + configuredMaxValueLength + "].");
- if (configuredMaxValueLength == 0) {
- LOG.info("Max length of column[" + columnName + "] was 0! Column will NOT be emitted in the audit.");
- } else if (configuredMaxValueLength < 0) {
- LOG.info("Max length of column[" + columnName + "] was less than 0! Column value will never be truncated.");
- }
- }
-
-
- public AuthzAuditEventDbObj() {
- super();
- }
-
- public AuthzAuditEventDbObj(AuthzAuditEvent event) {
- super();
- Date utcDate=null;
- if(event.getEventTime()!=null){
- utcDate=MiscUtil.getUTCDateForLocalDate(event.getEventTime());
- }else{
- utcDate=MiscUtil.getUTCDate();
- }
- this.repositoryType = event.getRepositoryType();
- this.repositoryName = event.getRepositoryName();
- this.user = event.getUser();
- this.timeStamp = utcDate;
- this.accessType = event.getAccessType();
- this.resourcePath = event.getResourcePath();
- this.resourceType = event.getResourceType();
- this.action = event.getAction();
- this.accessResult = event.getAccessResult();
- this.agentId = event.getAgentId();
- this.policyId = event.getPolicyId();
- this.resultReason = event.getResultReason();
- this.aclEnforcer = event.getAclEnforcer();
- this.sessionId = event.getSessionId();
- this.clientType = event.getClientType();
- this.clientIP = event.getClientIP();
- this.requestData = event.getRequestData();
- this.seqNum = event.getSeqNum();
- this.eventCount = event.getEventCount();
- this.eventDurationMS= event.getEventDurationMS();
- this.tags = StringUtils.join(event.getTags(), ", ");
- }
-
- @Id
- @SequenceGenerator(name="XA_ACCESS_AUDIT_SEQ",sequenceName="XA_ACCESS_AUDIT_SEQ",allocationSize=1)
- @GeneratedValue(strategy=GenerationType.AUTO,generator="XA_ACCESS_AUDIT_SEQ")
- @Column(name = "id", unique = true, nullable = false)
- public long getAuditId() {
- return this.auditId;
- }
-
- public void setAuditId(long auditId) {
- this.auditId = auditId;
- }
-
- @Column(name = "repo_type")
- public int getRepositoryType() {
- return this.repositoryType;
- }
-
- public void setRepositoryType(int repositoryType) {
- this.repositoryType = repositoryType;
- }
-
- @Column(name = "repo_name")
- public String getRepositoryName() {
- return truncate(this.repositoryName, MaxValueLengthRepoName, "repo_name");
- }
-
- public void setRepositoryName(String repositoryName) {
- this.repositoryName = repositoryName;
- }
-
- @Column(name = "request_user")
- public String getUser() {
- return truncate(this.user, MaxValueLengthRequestUser, "request_user");
- }
-
- public void setUser(String user) {
- this.user = user;
- }
-
- @Temporal(TemporalType.TIMESTAMP)
- @Column(name = "event_time")
- public Date getTimeStamp() {
- return this.timeStamp;
- }
-
- public void setTimeStamp(Date timeStamp) {
- this.timeStamp = timeStamp;
- }
-
- @Column(name = "access_type")
- public String getAccessType() {
- return truncate(this.accessType, MaxValueLengthAccessType, "access_type");
- }
-
- public void setAccessType(String accessType) {
- this.accessType = accessType;
- }
-
- @Column(name = "resource_path")
- public String getResourcePath() {
- return truncate(this.resourcePath, MaxValueLengthResourcePath, "resource_path");
- }
-
- public void setResourcePath(String resourcePath) {
- this.resourcePath = resourcePath;
- }
-
- @Column(name = "resource_type")
- public String getResourceType() {
- return truncate(this.resourceType, MaxValueLengthResourceType, "resource_type");
- }
-
- public void setResourceType(String resourceType) {
- this.resourceType = resourceType;
- }
-
- @Column(name = "action")
- public String getAction() {
- return truncate(this.action, MaxValueLengthAction, "action");
- }
-
- public void setAction(String action) {
- this.action = action;
- }
-
- @Column(name = "access_result")
- public int getAccessResult() {
- return this.accessResult;
- }
-
- public void setAccessResult(int accessResult) {
- this.accessResult = accessResult;
- }
-
- @Column(name = "agent_id")
- public String getAgentId() {
- return truncate(this.agentId, MaxValueLengthAgentId, "agent_id");
- }
-
- public void setAgentId(String agentId) {
- this.agentId = agentId;
- }
-
- @Column(name = "policy_id")
- public long getPolicyId() {
- return this.policyId;
- }
-
- public void setPolicyId(long policyId) {
- this.policyId = policyId;
- }
-
- @Column(name = "result_reason")
- public String getResultReason() {
- return truncate(this.resultReason, MaxValueLengthResultReason, "result_reason");
- }
-
- public void setResultReason(String resultReason) {
- this.resultReason = resultReason;
- }
-
- @Column(name = "acl_enforcer")
- public String getAclEnforcer() {
- return truncate(this.aclEnforcer, MaxValueLengthAclEnforcer, "acl_enforcer");
- }
-
- public void setAclEnforcer(String aclEnforcer) {
- this.aclEnforcer = aclEnforcer;
- }
-
- @Column(name = "session_id")
- public String getSessionId() {
- return truncate(this.sessionId, MaxValueLengthSessionId, "session_id");
- }
-
- public void setSessionId(String sessionId) {
- this.sessionId = sessionId;
- }
-
- @Column(name = "client_type")
- public String getClientType() {
- return truncate(this.clientType, MaxValueLengthClientType, "client_type");
- }
-
- public void setClientType(String clientType) {
- this.clientType = clientType;
- }
-
- @Column(name = "client_ip")
- public String getClientIP() {
- return truncate(this.clientIP, MaxValueLengthClientIp, "client_ip");
- }
-
- public void setClientIP(String clientIP) {
- this.clientIP = clientIP;
- }
-
- @Column(name = "request_data")
- public String getRequestData() {
- return truncate(this.requestData, MaxValueLengthRequestData, "request_data");
- }
-
- public void setRequestData(String requestData) {
- this.requestData = requestData;
- }
-
- @Column(name = "seq_num")
- public long getSeqNum() { return this.seqNum; }
-
- public void setSeqNum(long seqNum) { this.seqNum = seqNum; }
-
- @Column(name = "event_count")
- public long getEventCount() { return this.eventCount; }
-
- public void setEventCount(long eventCount) { this.eventCount = eventCount; }
-
- @Column(name = "event_dur_ms")
- public long getEventDurationMS() { return this.eventDurationMS; }
-
- public void setEventDurationMS(long eventDurationMS) { this.eventDurationMS = eventDurationMS; }
-
- @Column(name = "tags")
- public String getTags() {
- return this.tags;
- }
-
- public void setTags(String tags) {
- this.tags = tags;
- }
-
- static final String TruncationMarker = "...";
- static final int TruncationMarkerLength = TruncationMarker.length();
-
- protected String truncate(String value, int limit, String columnName) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("==> getTrunctedValue(%s, %d, %s)", value, limit, columnName));
- }
-
- String result = value;
- if (value != null) {
- if (limit < 0) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("Truncation is suppressed for column[%s]: old value [%s], new value[%s]", columnName, value, result));
- }
- } else if (limit == 0) {
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("Column[%s] is to be excluded from audit: old value [%s], new value[%s]", columnName, value, result));
- }
- result = null;
- } else {
- if (value.length() > limit) {
- if (limit <= TruncationMarkerLength) {
- // NOTE: If value is to be truncated to a size that is less than of equal to the Truncation Marker then we won't put the marker in!!
- result = value.substring(0, limit);
- } else {
- StringBuilder sb = new StringBuilder(value.substring(0, limit - TruncationMarkerLength));
- sb.append(TruncationMarker);
- result = sb.toString();
- }
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("Truncating value for column[%s] to [%d] characters: old value [%s], new value[%s]", columnName, limit, value, result));
- }
- }
- }
- }
-
- if (LOG.isDebugEnabled()) {
- LOG.debug(String.format("<== getTrunctedValue(%s, %d, %s): %s", value, limit, columnName, result));
- }
- return result;
- }
-}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java
index b5791467af..84eaebec9c 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditEventBase.java
@@ -21,15 +21,11 @@
import java.util.Date;
-import org.apache.ranger.audit.dao.DaoManager;
-
public abstract class AuditEventBase {
protected AuditEventBase() {
}
- public abstract void persist(DaoManager daoManager);
-
public abstract String getEventKey();
public abstract Date getEventTime ();
public abstract void setEventCount(long eventCount);
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditIndexRecord.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditIndexRecord.java
new file mode 100644
index 0000000000..d0ea2b9087
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuditIndexRecord.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.model;
+
+import java.util.Date;
+
+public class AuditIndexRecord {
+ String id;
+ String filePath;
+ int linePosition = 0;
+ SPOOL_FILE_STATUS status = SPOOL_FILE_STATUS.write_inprogress;
+ Date fileCreateTime;
+ Date writeCompleteTime;
+ Date doneCompleteTime;
+ Date lastSuccessTime;
+ Date lastFailedTime;
+ int failedAttemptCount = 0;
+ boolean lastAttempt = false;
+
+ public String getId() {
+ return id;
+ }
+
+ public void setId(String id) {
+ this.id = id;
+ }
+
+ public String getFilePath() {
+ return filePath;
+ }
+
+ public void setFilePath(String filePath) {
+ this.filePath = filePath;
+ }
+
+ public int getLinePosition() {
+ return linePosition;
+ }
+
+ public void setLinePosition(int linePosition) {
+ this.linePosition = linePosition;
+ }
+
+ public SPOOL_FILE_STATUS getStatus() {
+ return status;
+ }
+
+ public void setStatus(SPOOL_FILE_STATUS status) {
+ this.status = status;
+ }
+
+ public Date getFileCreateTime() {
+ return fileCreateTime;
+ }
+
+ public void setFileCreateTime(Date fileCreateTime) {
+ this.fileCreateTime = fileCreateTime;
+ }
+
+ public Date getWriteCompleteTime() {
+ return writeCompleteTime;
+ }
+
+ public void setWriteCompleteTime(Date writeCompleteTime) {
+ this.writeCompleteTime = writeCompleteTime;
+ }
+
+ public Date getDoneCompleteTime() {
+ return doneCompleteTime;
+ }
+
+ public void setDoneCompleteTime(Date doneCompleteTime) {
+ this.doneCompleteTime = doneCompleteTime;
+ }
+
+ public Date getLastSuccessTime() {
+ return lastSuccessTime;
+ }
+
+ public void setLastSuccessTime(Date lastSuccessTime) {
+ this.lastSuccessTime = lastSuccessTime;
+ }
+
+ public Date getLastFailedTime() {
+ return lastFailedTime;
+ }
+
+ public void setLastFailedTime(Date lastFailedTime) {
+ this.lastFailedTime = lastFailedTime;
+ }
+
+ public int getFailedAttemptCount() {
+ return failedAttemptCount;
+ }
+
+ public void setFailedAttemptCount(int failedAttemptCount) {
+ this.failedAttemptCount = failedAttemptCount;
+ }
+
+ public boolean getLastAttempt() {
+ return lastAttempt;
+ }
+
+ public void setLastAttempt(boolean lastAttempt) {
+ this.lastAttempt = lastAttempt;
+ }
+
+ @Override
+ public String toString() {
+ return "AuditIndexRecord [id=" + id + ", filePath=" + filePath
+ + ", linePosition=" + linePosition + ", status=" + status
+ + ", fileCreateTime=" + fileCreateTime
+ + ", writeCompleteTime=" + writeCompleteTime
+ + ", doneCompleteTime=" + doneCompleteTime
+ + ", lastSuccessTime=" + lastSuccessTime
+ + ", lastFailedTime=" + lastFailedTime
+ + ", failedAttemptCount=" + failedAttemptCount
+ + ", lastAttempt=" + lastAttempt + "]";
+ }
+}
+
+
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java
index 28db58cd96..5ed88eb6ab 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/AuthzAuditEvent.java
@@ -23,105 +23,112 @@
import java.util.HashSet;
import java.util.Set;
+import com.fasterxml.jackson.databind.annotation.JsonSerialize;
+import com.fasterxml.jackson.annotation.JsonIgnore;
import org.apache.commons.lang.StringUtils;
-import org.apache.ranger.audit.dao.DaoManager;
-import org.apache.ranger.audit.entity.AuthzAuditEventDbObj;
-import com.google.gson.annotations.SerializedName;
+import com.fasterxml.jackson.annotation.JsonProperty;
+@JsonSerialize
public class AuthzAuditEvent extends AuditEventBase {
protected static String FIELD_SEPARATOR = ";";
protected static final int MAX_ACTION_FIELD_SIZE = 1800;
protected static final int MAX_REQUEST_DATA_FIELD_SIZE = 1800;
- @SerializedName("repoType")
+ @JsonProperty("repoType")
protected int repositoryType = 0;
- @SerializedName("repo")
+ @JsonProperty("repo")
protected String repositoryName = null;
- @SerializedName("reqUser")
+ @JsonProperty("reqUser")
protected String user = null;
- @SerializedName("evtTime")
+ @JsonProperty("evtTime")
protected Date eventTime = new Date();
- @SerializedName("access")
+ @JsonProperty("access")
protected String accessType = null;
- @SerializedName("resource")
+ @JsonProperty("resource")
protected String resourcePath = null;
- @SerializedName("resType")
+ @JsonProperty("resType")
protected String resourceType = null;
- @SerializedName("action")
+ @JsonProperty("action")
protected String action = null;
- @SerializedName("result")
+ @JsonProperty("result")
protected short accessResult = 0; // 0 - DENIED; 1 - ALLOWED; HTTP return
// code
- @SerializedName("agent")
+ @JsonProperty("agent")
protected String agentId = null;
- @SerializedName("policy")
+ @JsonProperty("policy")
protected long policyId = 0;
- @SerializedName("reason")
+ @JsonProperty("reason")
protected String resultReason = null;
- @SerializedName("enforcer")
+ @JsonProperty("enforcer")
protected String aclEnforcer = null;
- @SerializedName("sess")
+ @JsonProperty("sess")
protected String sessionId = null;
- @SerializedName("cliType")
+ @JsonProperty("cliType")
protected String clientType = null;
- @SerializedName("cliIP")
+ @JsonProperty("cliIP")
protected String clientIP = null;
- @SerializedName("reqData")
+ @JsonProperty("reqData")
protected String requestData = null;
- @SerializedName("agentHost")
+ @JsonProperty("agentHost")
protected String agentHostname = null;
- @SerializedName("logType")
+ @JsonProperty("logType")
protected String logType = null;
- @SerializedName("id")
+ @JsonProperty("id")
protected String eventId = null;
/**
* This to ensure order within a session. Order not guaranteed across
* processes and hosts
*/
- @SerializedName("seq_num")
+ @JsonProperty("seq_num")
protected long seqNum = 0;
- @SerializedName("event_count")
+ @JsonProperty("event_count")
protected long eventCount = 1;
- @SerializedName("event_dur_ms")
+ @JsonProperty("event_dur_ms")
protected long eventDurationMS = 0;
- @SerializedName("tags")
+ @JsonProperty("tags")
protected Set tags = new HashSet<>();
- @SerializedName("additional_info")
+ @JsonProperty("datasets")
+ protected Set datasets = null;
+
+ @JsonProperty("projects")
+ protected Set projects = null;
+
+ @JsonProperty("additional_info")
protected String additionalInfo;
- @SerializedName("cluster_name")
+ @JsonProperty("cluster_name")
protected String clusterName;
- @SerializedName("zone_name")
+ @JsonProperty("zone_name")
protected String zoneName;
- @SerializedName("policy_version")
+ @JsonProperty("policy_version")
protected Long policyVersion;
public AuthzAuditEvent() {
@@ -474,18 +481,34 @@ public long getEventDurationMS() {
return eventDurationMS;
}
- public Set getTags() {
- return tags;
- }
-
public void setEventDurationMS(long frequencyDurationMS) {
this.eventDurationMS = frequencyDurationMS;
}
+ public Set getTags() {
+ return tags;
+ }
+
public void setTags(Set tags) {
this.tags = tags;
}
+ public Set getDatasets() {
+ return datasets;
+ }
+
+ public void setDatasets(Set datasets) {
+ this.datasets = datasets;
+ }
+
+ public Set getProjects() {
+ return projects;
+ }
+
+ public void setProjects(Set projects) {
+ this.projects = projects;
+ }
+
public String getClusterName() {
return clusterName;
}
@@ -514,6 +537,7 @@ public void setClusterName(String clusterName) {
public void setAdditionalInfo(String additionalInfo) { this.additionalInfo = additionalInfo; }
+ @JsonIgnore
@Override
public String getEventKey() {
String key = user + "^" + accessType + "^" + resourcePath + "^"
@@ -563,10 +587,9 @@ protected StringBuilder toString(StringBuilder sb) {
.append(FIELD_SEPARATOR).append("event_count=")
.append(eventCount).append(FIELD_SEPARATOR)
.append("event_dur_ms=").append(eventDurationMS)
- .append(FIELD_SEPARATOR)
- .append("tags=").append("[")
- .append(StringUtils.join(tags, ", "))
- .append("]")
+ .append(FIELD_SEPARATOR).append("tags=").append("[").append(StringUtils.join(tags, ", ")).append("]")
+ .append(FIELD_SEPARATOR).append("datasets=").append("[").append(datasets != null ? StringUtils.join(datasets, ", ") : "").append("]")
+ .append(FIELD_SEPARATOR).append("projects=").append("[").append(projects != null ? StringUtils.join(projects, ", ") : "").append("]")
.append(FIELD_SEPARATOR).append("clusterName=").append(clusterName)
.append(FIELD_SEPARATOR).append("zoneName=").append(zoneName)
.append(FIELD_SEPARATOR).append("policyVersion=").append(policyVersion)
@@ -574,10 +597,4 @@ protected StringBuilder toString(StringBuilder sb) {
return sb;
}
-
- @Override
- public void persist(DaoManager daoManager) {
- daoManager.getAuthzAuditEventDao().create(
- new AuthzAuditEventDbObj(this));
- }
}
diff --git a/ugsync/src/main/java/org/apache/ranger/ldapusersync/process/PolicyMgrUserGroupBuilder.java b/agents-audit/src/main/java/org/apache/ranger/audit/model/SPOOL_FILE_STATUS.java
similarity index 71%
rename from ugsync/src/main/java/org/apache/ranger/ldapusersync/process/PolicyMgrUserGroupBuilder.java
rename to agents-audit/src/main/java/org/apache/ranger/audit/model/SPOOL_FILE_STATUS.java
index d27518119a..3765c9c923 100644
--- a/ugsync/src/main/java/org/apache/ranger/ldapusersync/process/PolicyMgrUserGroupBuilder.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/model/SPOOL_FILE_STATUS.java
@@ -17,13 +17,8 @@
* under the License.
*/
-package org.apache.ranger.ldapusersync.process;
+package org.apache.ranger.audit.model;
-public class PolicyMgrUserGroupBuilder extends org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder {
-
- public static void main(String[] args) throws Throwable {
- PolicyMgrUserGroupBuilder ugbuilder = new PolicyMgrUserGroupBuilder();
- ugbuilder.init();
-
- }
+public enum SPOOL_FILE_STATUS {
+ pending, write_inprogress, read_inprogress, done
}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java
index c74a3eac05..2a3ec77b6a 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AsyncAuditProvider.java
@@ -25,14 +25,14 @@
import java.util.concurrent.atomic.AtomicLong;
import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class AsyncAuditProvider extends MultiDestAuditProvider implements
Runnable {
- private static final Log LOG = LogFactory.getLog(AsyncAuditProvider.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AsyncAuditProvider.class);
private static int sThreadCount = 0;
@@ -120,7 +120,7 @@ public void stop() {
while (mThread.isAlive()) {
try {
LOG.info(String.format("Waiting for child thread of %s to exit. Sleeping for %d secs", mName, mStopLoopIntervalSecs));
- mThread.join(mStopLoopIntervalSecs * 1000);
+ mThread.join(mStopLoopIntervalSecs * 1000L);
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for child thread to join! Proceeding with stop", e);
break;
@@ -255,7 +255,7 @@ public void waitToComplete(long maxWaitSeconds) {
&& (maxWaitSeconds <= 0 || maxWaitSeconds > waitTime); waitTime += mWaitToCompleteLoopIntervalSecs) {
try {
LOG.info(String.format("%d messages yet to be flushed by %s. Sleeoping for %d sec", mQueue.size(), mName, mWaitToCompleteLoopIntervalSecs));
- Thread.sleep(mWaitToCompleteLoopIntervalSecs * 1000);
+ Thread.sleep(mWaitToCompleteLoopIntervalSecs * 1000L);
} catch (InterruptedException excp) {
// someone really wants service to exit, abandon unwritten audits and exit.
LOG.warn("Caught interrupted exception! " + mQueue.size() + " messages still unflushed! Won't wait for queue to flush, exiting...", excp);
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileCacheProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileCacheProvider.java
index 8e3dd720d1..7957f2e10e 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileCacheProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditFileCacheProvider.java
@@ -18,10 +18,10 @@
*/
package org.apache.ranger.audit.provider;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.queue.AuditFileCacheProviderSpool;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Properties;
@@ -31,7 +31,7 @@
*/
public class AuditFileCacheProvider extends BaseAuditHandler {
- private static final Log logger = LogFactory.getLog(AuditFileCacheProvider.class);
+ private static final Logger logger = LoggerFactory.getLogger(AuditFileCacheProvider.class);
AuditFileCacheProviderSpool fileSpooler = null;
AuditHandler consumer = null;
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java
index 4ce31dd099..dd02255fc9 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditHandler.java
@@ -18,6 +18,7 @@
package org.apache.ranger.audit.provider;
+import java.io.File;
import java.util.Collection;
import java.util.Properties;
@@ -28,7 +29,8 @@ public interface AuditHandler {
boolean log(Collection events);
boolean logJSON(String event);
- boolean logJSON(Collection events);
+ boolean logJSON(Collection events);
+ boolean logFile(File file);
void init(Properties prop);
void init(Properties prop, String basePropertyName);
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java
index 1be9c2f86a..c10dd9ffaa 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditProviderFactory.java
@@ -19,14 +19,13 @@
package org.apache.ranger.audit.provider;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.ranger.audit.destination.*;
import org.apache.ranger.audit.provider.hdfs.HdfsAuditProvider;
@@ -34,8 +33,11 @@
import org.apache.ranger.audit.provider.solr.SolrAuditProvider;
import org.apache.ranger.audit.queue.AuditAsyncQueue;
import org.apache.ranger.audit.queue.AuditBatchQueue;
+import org.apache.ranger.audit.queue.AuditFileQueue;
import org.apache.ranger.audit.queue.AuditQueue;
import org.apache.ranger.audit.queue.AuditSummaryQueue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/*
* TODO:
@@ -45,11 +47,10 @@
*/
public class AuditProviderFactory {
- private static final Log LOG = LogFactory
- .getLog(AuditProviderFactory.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(AuditProviderFactory.class);
public static final String AUDIT_IS_ENABLED_PROP = "xasecure.audit.is.enabled";
- public static final String AUDIT_DB_IS_ENABLED_PROP = "xasecure.audit.db.is.enabled";
public static final String AUDIT_HDFS_IS_ENABLED_PROP = "xasecure.audit.hdfs.is.enabled";
public static final String AUDIT_LOG4J_IS_ENABLED_PROP = "xasecure.audit.log4j.is.enabled";
public static final String AUDIT_KAFKA_IS_ENABLED_PROP = "xasecure.audit.kafka.is.enabled";
@@ -58,6 +59,8 @@ public class AuditProviderFactory {
public static final String AUDIT_DEST_BASE = "xasecure.audit.destination";
public static final String AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC = "xasecure.audit.shutdown.hook.max.wait.seconds";
public static final String AUDIT_IS_FILE_CACHE_PROVIDER_ENABLE_PROP = "xasecure.audit.provider.filecache.is.enabled";
+ public static final String FILE_QUEUE_TYPE = "filequeue";
+ public static final String DEFAULT_QUEUE_TYPE = "memoryqueue";
public static final int AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC_DEFAULT = 30;
public static final int AUDIT_ASYNC_MAX_QUEUE_SIZE_DEFAULT = 10 * 1024;
@@ -71,6 +74,7 @@ public class AuditProviderFactory {
private String componentAppType = "";
private boolean mInitDone = false;
private JVMShutdownHook jvmShutdownHook = null;
+ private ArrayList hbaseAppTypes = new ArrayList<>(Arrays.asList("hbaseMaster","hbaseRegional"));
public AuditProviderFactory() {
LOG.info("AuditProviderFactory: creating..");
@@ -128,8 +132,6 @@ public synchronized void init(Properties props, String appType) {
return;
}
- boolean isAuditToDbEnabled = MiscUtil.getBooleanProperty(props,
- AUDIT_DB_IS_ENABLED_PROP, false);
boolean isAuditToHdfsEnabled = MiscUtil.getBooleanProperty(props,
AUDIT_HDFS_IS_ENABLED_PROP, false);
boolean isAuditToLog4jEnabled = MiscUtil.getBooleanProperty(props,
@@ -200,13 +202,13 @@ public synchronized void init(Properties props, String appType) {
qProvider.init(props, queuePropPrefix);
providers.add(queueProvider);
} else {
- LOG.fatal("Provider queue doesn't extend AuditQueue. Destination="
+ LOG.error("Provider queue doesn't extend AuditQueue. Destination="
+ destName
+ " can't be created. queueName="
+ queueName);
}
} else {
- LOG.fatal("Queue provider for destination " + destName
+ LOG.error("Queue provider for destination " + destName
+ " can't be created. queueName=" + queueName);
}
} else {
@@ -278,7 +280,7 @@ public synchronized void init(Properties props, String appType) {
} else {
LOG.info("No v3 audit configuration found. Trying v2 audit configurations");
if (!isEnabled
- || !(isAuditToDbEnabled || isAuditToHdfsEnabled
+ || !(isAuditToHdfsEnabled
|| isAuditToKafkaEnabled || isAuditToLog4jEnabled
|| isAuditToSolrEnabled || providers.size() == 0)) {
LOG.info("AuditProviderFactory: Audit not enabled..");
@@ -288,31 +290,6 @@ public synchronized void init(Properties props, String appType) {
return;
}
- if (isAuditToDbEnabled) {
- LOG.info("DbAuditProvider is enabled");
- DbAuditProvider dbProvider = new DbAuditProvider();
-
- boolean isAuditToDbAsync = MiscUtil.getBooleanProperty(props,
- DbAuditProvider.AUDIT_DB_IS_ASYNC_PROP, false);
-
- if (isAuditToDbAsync) {
- int maxQueueSize = MiscUtil.getIntProperty(props,
- DbAuditProvider.AUDIT_DB_MAX_QUEUE_SIZE_PROP,
- AUDIT_ASYNC_MAX_QUEUE_SIZE_DEFAULT);
- int maxFlushInterval = MiscUtil.getIntProperty(props,
- DbAuditProvider.AUDIT_DB_MAX_FLUSH_INTERVAL_PROP,
- AUDIT_ASYNC_MAX_FLUSH_INTERVAL_DEFAULT);
-
- AsyncAuditProvider asyncProvider = new AsyncAuditProvider(
- "DbAuditProvider", maxQueueSize, maxFlushInterval,
- dbProvider);
-
- providers.add(asyncProvider);
- } else {
- providers.add(dbProvider);
- }
- }
-
if (isAuditToHdfsEnabled) {
LOG.info("HdfsAuditProvider is enabled");
@@ -411,7 +388,7 @@ public synchronized void init(Properties props, String appType) {
mProvider.start();
}
- installJvmSutdownHook(props);
+ installJvmShutdownHook(props);
}
private AuditHandler getProviderFromConfig(Properties props,
@@ -431,7 +408,7 @@ private AuditHandler getProviderFromConfig(Properties props,
.newInstance();
}
} catch (Exception e) {
- LOG.fatal("Can't instantiate audit class for providerName="
+ LOG.error("Can't instantiate audit class for providerName="
+ providerName + ", className=" + className
+ ", propertyPrefix=" + propPrefix, e);
}
@@ -444,14 +421,14 @@ private AuditHandler getProviderFromConfig(Properties props,
provider = new SolrAuditDestination();
} else if (providerName.equalsIgnoreCase("elasticsearch")) {
provider = new ElasticSearchAuditDestination();
+ } else if (providerName.equalsIgnoreCase("amazon_cloudwatch")) {
+ provider = new AmazonCloudWatchAuditDestination();
} else if (providerName.equalsIgnoreCase("kafka")) {
provider = new KafkaAuditProvider();
- } else if (providerName.equalsIgnoreCase("db")) {
- provider = new DBAuditDestination();
} else if (providerName.equalsIgnoreCase("log4j")) {
provider = new Log4JAuditDestination();
} else if (providerName.equalsIgnoreCase("batch")) {
- provider = new AuditBatchQueue(consumer);
+ provider = getAuditProvider(props, propPrefix, consumer);
} else if (providerName.equalsIgnoreCase("async")) {
provider = new AuditAsyncQueue(consumer);
} else {
@@ -461,7 +438,7 @@ private AuditHandler getProviderFromConfig(Properties props,
}
if (provider != null && provider instanceof AuditQueue) {
if (consumer == null) {
- LOG.fatal("consumer can't be null for AuditQueue. queue="
+ LOG.error("consumer can't be null for AuditQueue. queue="
+ provider.getName() + ", propertyPrefix=" + propPrefix);
provider = null;
}
@@ -469,14 +446,41 @@ private AuditHandler getProviderFromConfig(Properties props,
return provider;
}
+ private AuditHandler getAuditProvider(Properties props, String propPrefix, AuditHandler consumer) {
+ AuditHandler ret = null;
+ String queueType = MiscUtil.getStringProperty(props, propPrefix + "." + "queuetype", DEFAULT_QUEUE_TYPE);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("==> AuditProviderFactory.getAuditProvider() propPerfix= " + propPrefix + ", " + " queueType= " + queueType);
+ }
+
+ if (FILE_QUEUE_TYPE.equalsIgnoreCase(queueType)) {
+ AuditFileQueue auditFileQueue = new AuditFileQueue(consumer);
+ String propPrefixFileQueue = propPrefix + "." + FILE_QUEUE_TYPE;
+ auditFileQueue.init(props, propPrefixFileQueue);
+ ret = new AuditBatchQueue(auditFileQueue);
+ } else {
+ ret = new AuditBatchQueue(consumer);
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("<== AuditProviderFactory.getAuditProvider()");
+ }
+
+ return ret;
+ }
+
private AuditHandler getDefaultProvider() {
return new DummyAuditProvider();
}
- private void installJvmSutdownHook(Properties props) {
+ private void installJvmShutdownHook(Properties props) {
int shutdownHookMaxWaitSeconds = MiscUtil.getIntProperty(props, AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC, AUDIT_SHUTDOWN_HOOK_MAX_WAIT_SEC_DEFAULT);
jvmShutdownHook = new JVMShutdownHook(mProvider, shutdownHookMaxWaitSeconds);
- ShutdownHookManager.get().addShutdownHook(jvmShutdownHook, RANGER_AUDIT_SHUTDOWN_HOOK_PRIORITY);
+ String appType = this.componentAppType;
+ if (appType != null && !hbaseAppTypes.contains(appType)) {
+ ShutdownHookManager.get().addShutdownHook(jvmShutdownHook, RANGER_AUDIT_SHUTDOWN_HOOK_PRIORITY);
+ }
}
private static class RangerAsyncAuditCleanup implements Runnable {
@@ -498,7 +502,7 @@ public void run() {
try {
startCleanup.acquire();
} catch (InterruptedException e) {
- LOG.info("RangerAsyncAuditCleanup: Interrupted while waiting for audit startCleanup signal! Exiting the thread...", e);
+ LOG.error("RangerAsyncAuditCleanup: Interrupted while waiting for audit startCleanup signal! Exiting the thread...", e);
break;
}
LOG.info("RangerAsyncAuditCleanup: Starting cleanup");
@@ -543,7 +547,7 @@ public void run() {
LOG.warn("JVMShutdownHook: could not detect finishing of audit cleanup even after waiting for " + maxWait + " seconds!");
}
} catch (InterruptedException e) {
- LOG.info("JVMShutdownHook: Interrupted while waiting for completion of Async executor!", e);
+ LOG.error("JVMShutdownHook: Interrupted while waiting for completion of Async executor!", e);
}
LOG.info("JVMShutdownHook: Interrupting ranger async audit cleanup thread");
cleanupThread.interrupt();
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditWriterFactory.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditWriterFactory.java
new file mode 100644
index 0000000000..38844c95d3
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/AuditWriterFactory.java
@@ -0,0 +1,116 @@
+package org.apache.ranger.audit.provider;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.ranger.audit.utils.RangerAuditWriter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Map;
+import java.util.Properties;
+
+public class AuditWriterFactory {
+ private static final Logger logger = LoggerFactory.getLogger(AuditWriterFactory.class);
+ public static final String AUDIT_FILETYPE_DEFAULT = "json";
+ public static final String AUDIT_JSON_FILEWRITER_IMPL = "org.apache.ranger.audit.utils.RangerJSONAuditWriter";
+ public static final String AUDIT_ORC_FILEWRITER_IMPL = "org.apache.ranger.audit.utils.RangerORCAuditWriter";
+
+ public Map auditConfigs = null;
+ public Properties props = null;
+ public String propPrefix = null;
+ public String auditProviderName = null;
+ public RangerAuditWriter auditWriter = null;
+ private static volatile AuditWriterFactory me = null;
+
+ public static AuditWriterFactory getInstance() {
+ AuditWriterFactory auditWriter = me;
+ if (auditWriter == null) {
+ synchronized (AuditWriterFactory.class) {
+ auditWriter = me;
+ if (auditWriter == null) {
+ me = auditWriter = new AuditWriterFactory();
+ }
+ }
+ }
+ return auditWriter;
+ }
+
+ public void init(Properties props, String propPrefix, String auditProviderName, Map auditConfigs) throws Exception {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AuditWriterFactory.init()");
+ }
+ this.props = props;
+ this.propPrefix = propPrefix;
+ this.auditProviderName = auditProviderName;
+ this.auditConfigs = auditConfigs;
+ String auditFileType = MiscUtil.getStringProperty(props, propPrefix + ".batch.filequeue.filetype", AUDIT_FILETYPE_DEFAULT);
+ String writerClass = MiscUtil.getStringProperty(props, propPrefix + ".filewriter.impl");
+
+ auditWriter = StringUtils.isEmpty(writerClass) ? createWriter(getDefaultWriter(auditFileType)) : createWriter(writerClass);
+
+ if (auditWriter != null) {
+ auditWriter.init(props, propPrefix, auditProviderName, auditConfigs);
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AuditWriterFactory.init() :" + auditWriter.getClass().getName());
+ }
+ }
+ }
+
+ public RangerAuditWriter createWriter(String writerClass) throws Exception {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AuditWriterFactory.createWriter()");
+ }
+ RangerAuditWriter ret = null;
+ try {
+ Class cls = (Class) Class.forName(writerClass);
+ ret = cls.newInstance();
+ } catch (Exception e) {
+ throw e;
+ }
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AuditWriterFactory.createWriter()");
+ }
+ return ret;
+ }
+
+ public String getDefaultWriter(String auditFileType) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AuditWriterFactory.getDefaultWriter()");
+ }
+ String ret = null;
+ switch (auditFileType) {
+ case "orc":
+ ret = AUDIT_ORC_FILEWRITER_IMPL;
+ break;
+ case "json":
+ ret = AUDIT_JSON_FILEWRITER_IMPL;
+ break;
+ }
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AuditWriterFactory.getDefaultWriter() :" + ret);
+ }
+ return ret;
+ }
+
+ public RangerAuditWriter getAuditWriter(){
+ return this.auditWriter;
+ }
+}
\ No newline at end of file
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java
index 6138ca0eb7..94c6d754b0 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/BaseAuditHandler.java
@@ -18,13 +18,12 @@
*/
package org.apache.ranger.audit.provider;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-import com.google.gson.GsonBuilder;
-
+import java.io.File;
import java.util.*;
import java.util.concurrent.atomic.AtomicLong;
@@ -32,10 +31,14 @@
import javax.net.ssl.TrustManagerFactory;
public abstract class BaseAuditHandler implements AuditHandler {
- private static final Log LOG = LogFactory.getLog(BaseAuditHandler.class);
+ private static final Logger LOG = LoggerFactory.getLogger(BaseAuditHandler.class);
static final String AUDIT_LOG_FAILURE_REPORT_MIN_INTERVAL_PROP = "xasecure.audit.log.failure.report.min.interval.ms";
- protected static final String AUDIT_DB_CREDENTIAL_PROVIDER_FILE = "xasecure.audit.credential.provider.file";
+
+ static final String AUDIT_LOG_STATUS_LOG_ENABLED = "xasecure.audit.log.status.log.enabled";
+ static final String AUDIT_LOG_STATUS_LOG_INTERVAL_SEC = "xasecure.audit.log.status.log.interval.sec";
+ static final boolean DEFAULT_AUDIT_LOG_STATUS_LOG_ENABLED = false;
+ static final long DEFAULT_AUDIT_LOG_STATUS_LOG_INTERVAL_SEC = 5L * 60; // 5 minutes
public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE = "xasecure.policymgr.clientssl.keystore";
public static final String RANGER_POLICYMGR_CLIENT_KEY_FILE_TYPE = "xasecure.policymgr.clientssl.keystore.type";
@@ -51,11 +54,11 @@ public abstract class BaseAuditHandler implements AuditHandler {
public static final String RANGER_SSL_KEYMANAGER_ALGO_TYPE = KeyManagerFactory.getDefaultAlgorithm();
public static final String RANGER_SSL_TRUSTMANAGER_ALGO_TYPE = TrustManagerFactory.getDefaultAlgorithm();
- public static final String RANGER_SSL_CONTEXT_ALGO_TYPE = "TLS";
+ public static final String RANGER_SSL_CONTEXT_ALGO_TYPE = "TLSv1.2";
public static final String PROP_CONFIG = "config";
-
- private int mLogFailureReportMinIntervalInMs = 60 * 1000;
+ public static final String FAILED_TO_LOG_AUDIT_EVENT = "failed to log audit event: {}";
+ private int mLogFailureReportMinIntervalInMs = 60 * 1000;
private AtomicLong mFailedLogLastReportTime = new AtomicLong(0);
private AtomicLong mFailedLogCountSinceLastReport = new AtomicLong(0);
@@ -89,11 +92,13 @@ public abstract class BaseAuditHandler implements AuditHandler {
long lastStashedCount = 0;
long lastDeferredCount = 0;
- long lastStatusLogTime = System.currentTimeMillis();
- long statusLogIntervalMS = 1 * 60 * 1000;
+ boolean statusLogEnabled = DEFAULT_AUDIT_LOG_STATUS_LOG_ENABLED;
+ long statusLogIntervalMS = DEFAULT_AUDIT_LOG_STATUS_LOG_INTERVAL_SEC * 1000;
+ long lastStatusLogTime = System.currentTimeMillis();
+ long nextStatusLogTime = lastStatusLogTime + statusLogIntervalMS;
protected Properties props = null;
- protected Map configProps = new HashMap();
+ protected Map configProps = new HashMap<>();
@Override
public void init(Properties props) {
@@ -126,17 +131,22 @@ public void init(Properties props, String basePropertyName) {
}
LOG.info("providerName=" + getName());
- try {
- new GsonBuilder().setDateFormat("yyyyMMdd-HH:mm:ss.SSS-Z").create();
- } catch (Throwable excp) {
- LOG.warn(
- "Log4jAuditProvider.init(): failed to create GsonBuilder object. events will be formated using toString(), instead of Json",
- excp);
- }
-
mLogFailureReportMinIntervalInMs = MiscUtil.getIntProperty(props,
AUDIT_LOG_FAILURE_REPORT_MIN_INTERVAL_PROP, 60 * 1000);
+ boolean globalStatusLogEnabled = MiscUtil.getBooleanProperty(props, AUDIT_LOG_STATUS_LOG_ENABLED, DEFAULT_AUDIT_LOG_STATUS_LOG_ENABLED);
+ long globalStatusLogIntervalSec = MiscUtil.getLongProperty(props, AUDIT_LOG_STATUS_LOG_INTERVAL_SEC, DEFAULT_AUDIT_LOG_STATUS_LOG_INTERVAL_SEC);
+
+ statusLogEnabled = MiscUtil.getBooleanProperty(props, basePropertyName + ".status.log.enabled", globalStatusLogEnabled);
+ statusLogIntervalMS = MiscUtil.getLongProperty(props, basePropertyName + ".status.log.interval.sec", globalStatusLogIntervalSec) * 1000;
+
+ nextStatusLogTime = lastStatusLogTime + statusLogIntervalMS;
+
+ LOG.info(AUDIT_LOG_STATUS_LOG_ENABLED + "=" + globalStatusLogEnabled);
+ LOG.info(AUDIT_LOG_STATUS_LOG_INTERVAL_SEC + "=" + globalStatusLogIntervalSec);
+ LOG.info(basePropertyName + ".status.log.enabled=" + statusLogEnabled);
+ LOG.info(basePropertyName + ".status.log.interval.sec=" + (statusLogIntervalMS / 1000));
+
String configPropsNamePrefix = propPrefix + "." + PROP_CONFIG + ".";
for (Object propNameObj : props.keySet()) {
String propName = propNameObj.toString();
@@ -185,13 +195,18 @@ public boolean logJSON(String event) {
*/
@Override
public boolean logJSON(Collection events) {
- List eventList = new ArrayList(events.size());
+ List eventList = new ArrayList<>(events.size());
for (String event : events) {
eventList.add(MiscUtil.fromJson(event, AuthzAuditEvent.class));
}
return log(eventList);
}
+ @Override
+ public boolean logFile(File file) {
+ return logFile(file);
+ }
+
public String getParentPath() {
return parentPath;
}
@@ -269,9 +284,10 @@ public long getLastDeferredCount() {
return lastDeferredCount;
}
+ public boolean isStatusLogEnabled() { return statusLogEnabled; }
+
public void logStatusIfRequired() {
- long currTime = System.currentTimeMillis();
- if ((currTime - lastStatusLogTime) > statusLogIntervalMS) {
+ if (System.currentTimeMillis() > nextStatusLogTime) {
logStatus();
}
}
@@ -279,9 +295,10 @@ public void logStatusIfRequired() {
public void logStatus() {
try {
long currTime = System.currentTimeMillis();
-
long diffTime = currTime - lastStatusLogTime;
+
lastStatusLogTime = currTime;
+ nextStatusLogTime = currTime + statusLogIntervalMS;
long diffCount = totalCount - lastIntervalCount;
long diffSuccess = totalSuccessCount - lastIntervalSuccessCount;
@@ -300,45 +317,50 @@ public void logStatus() {
lastStashedCount = totalStashedCount;
lastDeferredCount = totalDeferredCount;
- String finalPath = "";
- String tFinalPath = getFinalPath();
- if (!getName().equals(tFinalPath)) {
- finalPath = ", finalDestination=" + tFinalPath;
- }
+ if (statusLogEnabled) {
+ String finalPath = "";
+ String tFinalPath = getFinalPath();
+ if (!getName().equals(tFinalPath)) {
+ finalPath = ", finalDestination=" + tFinalPath;
+ }
- String msg = "Audit Status Log: name="
- + getName()
- + finalPath
- + ", interval="
- + formatIntervalForLog(diffTime)
- + ", events="
- + diffCount
- + (diffSuccess > 0 ? (", succcessCount=" + diffSuccess)
- : "")
- + (diffFailed > 0 ? (", failedCount=" + diffFailed) : "")
- + (diffStashed > 0 ? (", stashedCount=" + diffStashed) : "")
- + (diffDeferred > 0 ? (", deferredCount=" + diffDeferred)
- : "")
- + ", totalEvents="
- + totalCount
- + (totalSuccessCount > 0 ? (", totalSuccessCount=" + totalSuccessCount)
- : "")
- + (totalFailedCount > 0 ? (", totalFailedCount=" + totalFailedCount)
- : "")
- + (totalStashedCount > 0 ? (", totalStashedCount=" + totalStashedCount)
- : "")
- + (totalDeferredCount > 0 ? (", totalDeferredCount=" + totalDeferredCount)
- : "");
- LOG.info(msg);
- } catch (Throwable t) {
+ logAuditStatus(diffTime, diffCount, diffSuccess, diffFailed, diffStashed, diffDeferred, finalPath);
+ }
+ } catch (Exception t) {
LOG.error("Error while printing stats. auditProvider=" + getName());
}
}
-
- public void logError(String msg) {
+ private void logAuditStatus(long diffTime, long diffCount, long diffSuccess, long diffFailed, long diffStashed, long diffDeferred, String finalPath) {
+ String msg = "Audit Status Log: name="
+ + getName()
+ + finalPath
+ + ", interval="
+ + formatIntervalForLog(diffTime)
+ + ", events="
+ + diffCount
+ + (diffSuccess > 0 ? (", succcessCount=" + diffSuccess)
+ : "")
+ + (diffFailed > 0 ? (", failedCount=" + diffFailed) : "")
+ + (diffStashed > 0 ? (", stashedCount=" + diffStashed) : "")
+ + (diffDeferred > 0 ? (", deferredCount=" + diffDeferred)
+ : "")
+ + ", totalEvents="
+ + totalCount
+ + (totalSuccessCount > 0 ? (", totalSuccessCount=" + totalSuccessCount)
+ : "")
+ + (totalFailedCount > 0 ? (", totalFailedCount=" + totalFailedCount)
+ : "")
+ + (totalStashedCount > 0 ? (", totalStashedCount=" + totalStashedCount)
+ : "")
+ + (totalDeferredCount > 0 ? (", totalDeferredCount=" + totalDeferredCount)
+ : "");
+ LOG.info(msg);
+ }
+
+ public void logError(String msg, Object arg) {
long currTimeMS = System.currentTimeMillis();
if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) {
- LOG.error(msg);
+ LOG.error(msg, arg);
lastErrorLogMS = currTimeMS;
}
}
@@ -391,19 +413,13 @@ public void logFailedEvent(AuditEventBase event, Throwable excp) {
mFailedLogCountSinceLastReport.set(0);
if (excp != null) {
- LOG.warn(
- "failed to log audit event: "
- + MiscUtil.stringify(event), excp);
+ LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, MiscUtil.stringify(event), excp);
} else {
- LOG.warn("failed to log audit event: "
- + MiscUtil.stringify(event));
+ LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, MiscUtil.stringify(event));
}
if (countLifeTime > 1) { // no stats to print for the 1st failure
- LOG.warn("Log failure count: " + countSinceLastReport
- + " in past "
- + formatIntervalForLog(timeSinceLastReport) + "; "
- + countLifeTime + " during process lifetime");
+ LOG.warn("Log failure count: {} in past {}; {} during process lifetime", countSinceLastReport, formatIntervalForLog(timeSinceLastReport), countLifeTime);
}
}
}
@@ -430,14 +446,10 @@ public void logFailedEvent(AuditEventBase event, String message) {
mFailedLogLastReportTime.set(now);
mFailedLogCountSinceLastReport.set(0);
- LOG.warn("failed to log audit event: " + MiscUtil.stringify(event)
- + ", errorMessage=" + message);
+ LOG.warn("failed to log audit event: {} , errorMessage={}", MiscUtil.stringify(event), message);
if (countLifeTime > 1) { // no stats to print for the 1st failure
- LOG.warn("Log failure count: " + countSinceLastReport
- + " in past "
- + formatIntervalForLog(timeSinceLastReport) + "; "
- + countLifeTime + " during process lifetime");
+ LOG.warn("Log failure count: {} in past {}; {} during process lifetime", countSinceLastReport, formatIntervalForLog(timeSinceLastReport), countLifeTime);
}
}
}
@@ -462,16 +474,13 @@ public void logFailedEventJSON(String event, Throwable excp) {
mFailedLogCountSinceLastReport.set(0);
if (excp != null) {
- LOG.warn("failed to log audit event: " + event, excp);
+ LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, event, excp);
} else {
- LOG.warn("failed to log audit event: " + event);
+ LOG.warn(FAILED_TO_LOG_AUDIT_EVENT, event);
}
if (countLifeTime > 1) { // no stats to print for the 1st failure
- LOG.warn("Log failure count: " + countSinceLastReport
- + " in past "
- + formatIntervalForLog(timeSinceLastReport) + "; "
- + countLifeTime + " during process lifetime");
+ LOG.warn("Log failure count: {} in past {}; {} during process lifetime", countSinceLastReport, formatIntervalForLog(timeSinceLastReport), countLifeTime);
}
}
}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java
deleted file mode 100644
index 95909a9c69..0000000000
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DbAuditProvider.java
+++ /dev/null
@@ -1,370 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ranger.audit.provider;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Map;
-import java.util.Properties;
-
-import javax.persistence.EntityManager;
-import javax.persistence.EntityManagerFactory;
-import javax.persistence.EntityTransaction;
-import javax.persistence.Persistence;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.ranger.audit.dao.DaoManager;
-import org.apache.ranger.audit.destination.AuditDestination;
-import org.apache.ranger.audit.entity.AuthzAuditEventDbObj;
-import org.apache.ranger.audit.model.AuditEventBase;
-import org.apache.ranger.audit.model.AuthzAuditEvent;
-import org.apache.ranger.authorization.hadoop.utils.RangerCredentialProvider;
-
-
-/*
- * NOTE:
- * - Instances of this class are not thread-safe.
- */
-public class DbAuditProvider extends AuditDestination {
-
- private static final Log LOG = LogFactory.getLog(DbAuditProvider.class);
-
- public static final String AUDIT_DB_IS_ASYNC_PROP = "xasecure.audit.db.is.async";
- public static final String AUDIT_DB_MAX_QUEUE_SIZE_PROP = "xasecure.audit.db.async.max.queue.size";
- public static final String AUDIT_DB_MAX_FLUSH_INTERVAL_PROP = "xasecure.audit.db.async.max.flush.interval.ms";
-
- private static final String AUDIT_DB_BATCH_SIZE_PROP = "xasecure.audit.db.batch.size";
- private static final String AUDIT_DB_RETRY_MIN_INTERVAL_PROP = "xasecure.audit.db.config.retry.min.interval.ms";
- private static final String AUDIT_JPA_CONFIG_PROP_PREFIX = "xasecure.audit.jpa.";
- private static final String AUDIT_DB_CREDENTIAL_PROVIDER_FILE = "xasecure.audit.credential.provider.file";
- private static final String AUDIT_DB_CREDENTIAL_PROVIDER_ALIAS = "auditDBCred";
- private static final String AUDIT_JPA_JDBC_PASSWORD = "javax.persistence.jdbc.password";
-
- private EntityManagerFactory entityManagerFactory;
- private DaoManager daoManager;
-
- private int mCommitBatchSize = 1;
- private int mDbRetryMinIntervalMs = 60 * 1000;
- private ArrayList mUncommitted = new ArrayList();
- private Map mDbProperties = null;
- private long mLastDbFailedTime = 0;
-
- public DbAuditProvider() {
- LOG.info("DbAuditProvider: creating..");
- }
-
- @Override
- public void init(Properties props) {
- LOG.info("DbAuditProvider.init()");
-
- super.init(props);
-
- mDbProperties = MiscUtil.getPropertiesWithPrefix(props, AUDIT_JPA_CONFIG_PROP_PREFIX);
- mCommitBatchSize = MiscUtil.getIntProperty(props, AUDIT_DB_BATCH_SIZE_PROP, 1000);
- mDbRetryMinIntervalMs = MiscUtil.getIntProperty(props, AUDIT_DB_RETRY_MIN_INTERVAL_PROP, 15 * 1000);
-
- boolean isAsync = MiscUtil.getBooleanProperty(props, AUDIT_DB_IS_ASYNC_PROP, false);
-
- if(! isAsync) {
- mCommitBatchSize = 1; // Batching not supported in sync mode
- }
-
- String jdbcPassword = getCredentialString(MiscUtil.getStringProperty(props, AUDIT_DB_CREDENTIAL_PROVIDER_FILE), AUDIT_DB_CREDENTIAL_PROVIDER_ALIAS);
-
- if(jdbcPassword != null && !jdbcPassword.isEmpty()) {
- mDbProperties.put(AUDIT_JPA_JDBC_PASSWORD, jdbcPassword);
- }
-
- // initialize the database related classes
- AuthzAuditEventDbObj.init(props);
- }
-
- @Override
- public boolean log(AuditEventBase event) {
- LOG.debug("DbAuditProvider.log()");
-
- boolean isSuccess = false;
-
- try {
- if(preCreate()) {
- DaoManager daoMgr = daoManager;
-
- if(daoMgr != null) {
- event.persist(daoMgr);
-
- isSuccess = postCreate(event);
- }
- }
- } catch(Exception excp) {
- logDbError("DbAuditProvider.log(): failed", excp);
- } finally {
- if(! isSuccess) {
- logFailedEvent(event);
- }
- }
- LOG.debug("<== DbAuditProvider.log()");
- return isSuccess;
- }
-
- @Override
- public boolean log(Collection events) {
- boolean ret = true;
- for (AuditEventBase event : events) {
- ret = log(event);
- if(!ret) {
- break;
- }
- }
- return ret;
- }
-
- @Override
- public boolean logJSON(String event) {
- AuditEventBase eventObj = MiscUtil.fromJson(event,
- AuthzAuditEvent.class);
- return log(eventObj);
- }
-
- @Override
- public boolean logJSON(Collection events) {
- boolean ret = true;
- for (String event : events) {
- ret = logJSON(event);
- if( !ret ) {
- break;
- }
- }
- return ret;
- }
-
- @Override
- public void start() {
- LOG.info("DbAuditProvider.start()");
-
- init();
- }
-
- @Override
- public void stop() {
- LOG.info("DbAuditProvider.stop()");
-
- cleanUp();
- }
-
- @Override
- public void flush() {
- if(mUncommitted.size() > 0) {
- boolean isSuccess = commitTransaction();
-
- if(! isSuccess) {
- for(AuditEventBase evt : mUncommitted) {
- logFailedEvent(evt);
- }
- }
-
- mUncommitted.clear();
- }
- }
-
- private synchronized boolean init() {
- long now = System.currentTimeMillis();
-
- if((now - mLastDbFailedTime) < mDbRetryMinIntervalMs) {
- return false;
- }
-
- LOG.info("DbAuditProvider: init()");
- LOG.info("java.library.path:"+System.getProperty("java.library.path"));
- try {
- entityManagerFactory = Persistence.createEntityManagerFactory("xa_server", mDbProperties);
-
- daoManager = new DaoManager();
- daoManager.setEntityManagerFactory(entityManagerFactory);
-
- daoManager.getEntityManager(); // this forces the connection to be made to DB
- } catch(Exception excp) {
- logDbError("DbAuditProvider: DB initalization failed", excp);
-
- cleanUp();
-
- return false;
- }
-
- return true;
- }
-
- private synchronized void cleanUp() {
- LOG.info("DbAuditProvider: cleanUp()");
-
- try {
- if(entityManagerFactory != null && entityManagerFactory.isOpen()) {
- entityManagerFactory.close();
- }
- } catch(Exception excp) {
- LOG.error("DbAuditProvider.cleanUp(): failed", excp);
- } finally {
- entityManagerFactory = null;
- daoManager = null;
- }
- }
-
- private boolean isDbConnected() {
- EntityManager em = getEntityManager();
-
- return em != null && em.isOpen();
- }
-
- private EntityManager getEntityManager() {
- DaoManager daoMgr = daoManager;
-
- if(daoMgr != null) {
- try {
- return daoMgr.getEntityManager();
- } catch(Exception excp) {
- logDbError("DbAuditProvider.getEntityManager(): failed", excp);
-
- cleanUp();
- }
- }
-
- return null;
- }
-
- private void clearEntityManager() {
- try {
- EntityManager em = getEntityManager();
-
- if(em != null) {
- em.clear();
- }
- } catch(Exception excp) {
- LOG.warn("DbAuditProvider.clearEntityManager(): failed", excp);
- }
- }
-
- private EntityTransaction getTransaction() {
- EntityManager em = getEntityManager();
-
- return em != null ? em.getTransaction() : null;
- }
-
- private boolean isInTransaction() {
- EntityTransaction trx = getTransaction();
-
- return trx != null && trx.isActive();
- }
-
- private boolean beginTransaction() {
- EntityTransaction trx = getTransaction();
-
- if(trx != null && !trx.isActive()) {
- trx.begin();
- }
-
- if(trx == null) {
- LOG.warn("DbAuditProvider.beginTransaction(): trx is null");
- }
-
- return trx != null;
- }
-
- private boolean commitTransaction() {
- boolean ret = false;
- EntityTransaction trx = null;
-
- try {
- trx = getTransaction();
-
- if(trx != null && trx.isActive()) {
- trx.commit();
-
- ret =true;
- } else {
- throw new Exception("trx is null or not active");
- }
- } catch(Exception excp) {
- logDbError("DbAuditProvider.commitTransaction(): failed", excp);
-
- cleanUp(); // so that next insert will try to init()
- } finally {
- clearEntityManager();
- }
-
- return ret;
- }
-
- private boolean preCreate() {
- boolean ret = true;
-
- if(!isDbConnected()) {
- ret = init();
- }
-
- if(ret) {
- if(! isInTransaction()) {
- ret = beginTransaction();
- }
- }
-
- return ret;
- }
-
- private boolean postCreate(AuditEventBase event) {
- boolean ret = true;
-
- if(mCommitBatchSize <= 1) {
- ret = commitTransaction();
- } else {
- mUncommitted.add(event);
-
- if((mUncommitted.size() % mCommitBatchSize) == 0) {
- ret = commitTransaction();
-
- if(! ret) {
- for(AuditEventBase evt : mUncommitted) {
- if(evt != event) {
- logFailedEvent(evt);
- }
- }
- }
-
- mUncommitted.clear();
- }
- }
- return ret;
- }
-
- private void logDbError(String msg, Exception excp) {
- long now = System.currentTimeMillis();
-
- if((now - mLastDbFailedTime) > mDbRetryMinIntervalMs) {
- mLastDbFailedTime = now;
- }
-
- LOG.warn(msg, excp);
- }
-
- private String getCredentialString(String url,String alias) {
- if(url != null && alias != null) {
- return RangerCredentialProvider.getInstance().getCredentialString(url,alias);
- }
- return null;
- }
-}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java
index 05f882ff32..cbd25ab7c7 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/DummyAuditProvider.java
@@ -17,6 +17,7 @@
*/
package org.apache.ranger.audit.provider;
+import java.io.File;
import java.util.Collection;
import java.util.Properties;
@@ -103,4 +104,12 @@ public String getName() {
return this.getClass().getName();
}
+ /* (non-Javadoc)
+ * @see org.apache.ranger.audit.provider.AuditProvider#getAuditFileType()
+ */
+ @Override
+ public boolean logFile(File file) {
+ return logFile(file);
+ }
+
}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/LocalFileLogBuffer.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/LocalFileLogBuffer.java
index 769b86c632..d720ebcccb 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/LocalFileLogBuffer.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/LocalFileLogBuffer.java
@@ -32,7 +32,7 @@
import java.io.OutputStreamWriter;
import java.io.UnsupportedEncodingException;
import java.io.Writer;
-import java.security.PrivilegedAction;
+import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.Comparator;
import java.util.TreeSet;
@@ -244,16 +244,11 @@ private synchronized void openFile() {
if(ostream != null) {
mWriter = createWriter(ostream);
-
- if(mWriter != null) {
- mLogger.debug("LocalFileLogBuffer.openFile(): opened file " + mBufferFilename);
-
- mNextFlushTime = System.currentTimeMillis() + (mFlushIntervalSeconds * 1000L);
- } else {
- mLogger.warn("LocalFileLogBuffer.openFile(): failed to open file for write " + mBufferFilename);
-
- mBufferFilename = null;
- }
+ mLogger.debug("LocalFileLogBuffer.openFile(): opened file " + mBufferFilename);
+ mNextFlushTime = System.currentTimeMillis() + (mFlushIntervalSeconds * 1000L);
+ } else {
+ mLogger.warn("LocalFileLogBuffer.openFile(): failed to open file for write " + mBufferFilename);
+ mBufferFilename = null;
}
} finally {
if(mWriter == null) {
@@ -387,7 +382,7 @@ public void addLogfile(String filename) {
if(filename != null) {
synchronized(mCompletedLogfiles) {
mCompletedLogfiles.add(filename);
- mCompletedLogfiles.notify();
+ mCompletedLogfiles.notifyAll();
}
}
@@ -420,14 +415,15 @@ public void run() {
return;
}
- loginUser.doAs(new PrivilegedAction() {
- @Override
- public Integer run() {
+ try {
+ loginUser.doAs((PrivilegedExceptionAction) () -> {
doRun();
return 0;
- }
- });
+ });
+ } catch (Exception excp) {
+ mLogger.error("DestinationDispatcherThread.run(): failed", excp);
+ }
}
private void doRun() {
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java
index 353c809633..0a874ef7df 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jAuditProvider.java
@@ -21,17 +21,17 @@
import java.util.Collection;
import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.destination.AuditDestination;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class Log4jAuditProvider extends AuditDestination {
- private static final Log LOG = LogFactory.getLog(Log4jAuditProvider.class);
- private static final Log AUDITLOG = LogFactory.getLog("xaaudit." + Log4jAuditProvider.class.getName());
+ private static final Logger LOG = LoggerFactory.getLogger(Log4jAuditProvider.class);
+ private static final Logger AUDITLOG = LoggerFactory.getLogger("xaaudit." + Log4jAuditProvider.class.getName());
public static final String AUDIT_LOG4J_IS_ASYNC_PROP = "xasecure.audit.log4j.is.async";
public static final String AUDIT_LOG4J_MAX_QUEUE_SIZE_PROP = "xasecure.audit.log4j.async.max.queue.size";
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jTracer.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jTracer.java
index bdb1a4774d..cd5befcd2d 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jTracer.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/Log4jTracer.java
@@ -16,12 +16,12 @@
*/
package org.apache.ranger.audit.provider;
-import org.apache.commons.logging.Log;
+import org.slf4j.Logger;
public class Log4jTracer implements DebugTracer {
- private Log mLogger = null;
+ private Logger mLogger = null;
- public Log4jTracer(Log logger) {
+ public Log4jTracer(Logger logger) {
mLogger = logger;
}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MiscUtil.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MiscUtil.java
index e2b74489b8..8004b75dde 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MiscUtil.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MiscUtil.java
@@ -24,6 +24,9 @@
import java.security.PrivilegedAction;
import java.security.PrivilegedExceptionAction;
import java.text.SimpleDateFormat;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
@@ -46,23 +49,23 @@
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
+import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
-import org.apache.log4j.helpers.LogLog;
import org.apache.ranger.authorization.hadoop.utils.RangerCredentialProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
+import com.fasterxml.jackson.core.JsonParser;
+import static com.fasterxml.jackson.databind.DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
public class MiscUtil {
- private static final Log logger = LogFactory.getLog(MiscUtil.class);
+ private static final Logger logger = LoggerFactory.getLogger(MiscUtil.class);
public static final String TOKEN_START = "%";
public static final String TOKEN_END = "%";
@@ -78,7 +81,21 @@ public class MiscUtil {
public static String LINE_SEPARATOR = System.getProperty("line.separator");
- private static Gson sGsonBuilder = null;
+ static private final ThreadLocal MAPPER = new ThreadLocal() {
+ @Override
+ protected ObjectMapper initialValue() {
+ ObjectMapper objectMapper = new ObjectMapper();
+ SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+ objectMapper.setDateFormat(dateFormat);
+ objectMapper.configure(FAIL_ON_UNKNOWN_PROPERTIES, false);
+ objectMapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
+ return objectMapper;
+ }
+ } ;
+
+ static public ObjectMapper getMapper() {
+ return MAPPER.get();
+ }
private static String sApplicationType = null;
private static UserGroupInformation ugiLoginUser = null;
private static Subject subjectLoginUser = null;
@@ -88,15 +105,6 @@ public class MiscUtil {
private static int logInterval = 30000; // 30 seconds
static {
- try {
- sGsonBuilder = new GsonBuilder().setDateFormat(
- "yyyy-MM-dd HH:mm:ss.SSS").create();
- } catch (Throwable excp) {
- LogLog.warn(
- "failed to create GsonBuilder object. stringify() will return obj.toString(), instead of Json",
- excp);
- }
-
initLocalHost();
}
@@ -203,7 +211,7 @@ public static String getSystemProperty(String propertyName) {
ret = propertyName != null ? System.getProperty(propertyName)
: null;
} catch (Exception excp) {
- LogLog.warn("getSystemProperty(" + propertyName + ") failed", excp);
+ logger.warn("getSystemProperty(" + propertyName + ") failed", excp);
}
return ret;
@@ -215,7 +223,7 @@ public static String getEnv(String envName) {
try {
ret = envName != null ? System.getenv(envName) : null;
} catch (Exception excp) {
- LogLog.warn("getenv(" + envName + ") failed", excp);
+ logger.warn("getenv(" + envName + ") failed", excp);
}
return ret;
@@ -229,7 +237,7 @@ public static String getFormattedTime(long time, String format) {
ret = sdf.format(time);
} catch (Exception excp) {
- LogLog.warn("SimpleDateFormat.format() failed: " + format, excp);
+ logger.warn("SimpleDateFormat.format() failed: " + format, excp);
}
return ret;
@@ -244,7 +252,7 @@ public static void createParents(File file) {
if (!parentDir.exists()) {
if (!parentDir.mkdirs()) {
- LogLog.warn("createParents(): failed to create "
+ logger.warn("createParents(): failed to create "
+ parentDir.getAbsolutePath());
}
}
@@ -312,8 +320,13 @@ public static String stringify(T log) {
if (log != null) {
if (log instanceof String) {
ret = (String) log;
- } else if (MiscUtil.sGsonBuilder != null) {
- ret = MiscUtil.sGsonBuilder.toJson(log);
+ } else if (getMapper() != null) {
+ try {
+ ret = getMapper().writeValueAsString(log);
+ } catch (Exception e) {
+ logger.error("Error occurred while processing JSOn object " + log, e);
+ ret = log.toString(); // Fallback to default toString() method
+ }
} else {
ret = log.toString();
}
@@ -323,7 +336,12 @@ public static String stringify(T log) {
}
static public T fromJson(String jsonStr, Class clazz) {
- return sGsonBuilder.fromJson(jsonStr, clazz);
+ try {
+ return getMapper().readValue(jsonStr, clazz);
+ } catch (Exception exception) {
+ logger.error("Error occurred while processing JSOn object " + jsonStr, exception);
+ }
+ return null;
}
public static String getStringProperty(Properties props, String propName) {
@@ -339,6 +357,19 @@ public static String getStringProperty(Properties props, String propName) {
return ret;
}
+ public static String getStringProperty(Properties props, String propName, String defValue) {
+ String ret = defValue;
+
+ if (props != null && propName != null) {
+ String val = props.getProperty(propName);
+ if (val != null) {
+ ret = val;
+ }
+ }
+
+ return ret;
+ }
+
public static boolean getBooleanProperty(Properties props, String propName,
boolean defValue) {
boolean ret = defValue;
@@ -461,7 +492,7 @@ public static UserGroupInformation createUGIFromSubject(Subject subject)
logger.info("Default UGI before using new Subject:"
+ UserGroupInformation.getLoginUser());
} catch (Throwable t) {
- logger.error(t);
+ logger.error("failed to get login user", t);
}
ugi = UserGroupInformation.getUGIFromSubject(subject);
logger.info("SUBJECT.UGI.NAME=" + ugi.getUserName() + ", ugi="
@@ -600,7 +631,7 @@ static public Set getGroupsForRequestUser(String userName) {
return Collections.emptySet();
}
- static public boolean logErrorMessageByInterval(Log useLogger,
+ static public boolean logErrorMessageByInterval(Logger useLogger,
String message) {
return logErrorMessageByInterval(useLogger, message, null);
}
@@ -610,7 +641,7 @@ static public boolean logErrorMessageByInterval(Log useLogger,
* @param message
* @param e
*/
- static public boolean logErrorMessageByInterval(Log useLogger,
+ static public boolean logErrorMessageByInterval(Logger useLogger,
String message, Throwable e) {
if (message == null) {
return false;
@@ -771,6 +802,45 @@ public static void authWithKerberos(String keytab, String principal,
}
+ public static void loginWithKeyTab(String keytab, String principal, String nameRules) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> MiscUtil.loginWithKeyTab() keytab= " + keytab + "principal= " + principal + "nameRules= " + nameRules);
+ }
+
+ if (keytab == null || principal == null) {
+ logger.error("Failed to login as keytab or principal is null!");
+ return;
+ }
+
+ String[] spnegoPrincipals;
+ UserGroupInformation ugi;
+
+ try {
+ if (principal.equals("*")) {
+ spnegoPrincipals = KerberosUtil.getPrincipalNames(keytab, Pattern.compile("HTTP/.*"));
+ if (spnegoPrincipals.length == 0) {
+ logger.error("No principals found in keytab= " + keytab);
+ }
+ } else {
+ spnegoPrincipals = new String[] { principal };
+ }
+
+ if (nameRules != null) {
+ KerberosName.setRules(nameRules);
+ }
+
+ logger.info("Creating UGI from keytab directly. keytab= " + keytab + ", principal= " + spnegoPrincipals[0]);
+ ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(spnegoPrincipals[0], keytab);
+ MiscUtil.setUGILoginUser(ugi, null);
+ } catch (Exception e) {
+ logger.error("Failed to login with given keytab= " + keytab + "principal= " + principal + "nameRules= " + nameRules, e);
+ }
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== MiscUtil.loginWithKeyTab()");
+ }
+ }
+
static class LogHistory {
long lastLogTime = 0;
int counter = 0;
@@ -842,7 +912,7 @@ private static void initLocalHost() {
try {
local_hostname = InetAddress.getLocalHost().getHostName();
} catch (Throwable excp) {
- LogLog.warn("getHostname()", excp);
+ logger.warn("getHostname()", excp);
}
if ( logger.isDebugEnabled() ) {
logger.debug("<== MiscUtil.initLocalHost()");
@@ -872,4 +942,73 @@ private static class RandomHolder {
static final Random random = new Random();
}
+ // Utility methods
+ public static int toInt(Object value) {
+ if (value == null) {
+ return 0;
+ }
+ if (value instanceof Integer) {
+ return (Integer) value;
+ }
+ if (value.toString().isEmpty()) {
+ return 0;
+ }
+ try {
+ return Integer.valueOf(value.toString());
+ } catch (Throwable t) {
+ logger.error("Error converting value to integer. Value = " + value, t);
+ }
+ return 0;
+ }
+
+ public static long toLong(Object value) {
+ if (value == null) {
+ return 0;
+ }
+ if (value instanceof Long) {
+ return (Long) value;
+ }
+ if (value.toString().isEmpty()) {
+ return 0;
+ }
+ try {
+ return Long.valueOf(value.toString());
+ } catch (Throwable t) {
+ logger.error("Error converting value to long. Value = " + value, t);
+ }
+ return 0;
+ }
+
+ public static Date toDate(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof Date) {
+ return (Date) value;
+ }
+ try {
+ // TODO: Do proper parsing based on Solr response value
+ return new Date(value.toString());
+ } catch (Throwable t) {
+ logger.error("Error converting value to date. Value = " + value, t);
+ }
+ return null;
+ }
+
+ public static Date toLocalDate(Object value) {
+ if (value == null) {
+ return null;
+ }
+ if (value instanceof Date) {
+ return (Date) value;
+ }
+ try {
+ LocalDateTime localDateTime = LocalDateTime.parse(value.toString(), DateTimeFormatter.ISO_DATE_TIME);
+ return Date.from(localDateTime.atZone(ZoneId.systemDefault()).toInstant());
+ } catch (Throwable t) {
+ logger.error("Error converting value to date. Value = " + value, t);
+ }
+ return null;
+ }
+
}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java
index 282f5abfa0..5ac8c0ee03 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/MultiDestAuditProvider.java
@@ -17,19 +17,20 @@
*/
package org.apache.ranger.audit.provider;
+import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class MultiDestAuditProvider extends BaseAuditHandler {
- private static final Log LOG = LogFactory
- .getLog(MultiDestAuditProvider.class);
+ private static final Logger LOG = LoggerFactory
+ .getLogger(MultiDestAuditProvider.class);
protected List mProviders = new ArrayList();
static final String DEFAULT_NAME = "multi_dest";
@@ -86,8 +87,10 @@ public void setName(String name) {
public void addAuditProvider(AuditHandler provider) {
if (provider != null) {
- LOG.info("MultiDestAuditProvider.addAuditProvider(providerType="
- + provider.getClass().getCanonicalName() + ")");
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("MultiDestAuditProvider.addAuditProvider(providerType="
+ + provider.getClass().getCanonicalName() + ")");
+ }
mProviders.add(provider);
if (provider instanceof BaseAuditHandler) {
@@ -155,6 +158,19 @@ public boolean logJSON(Collection events) {
return true;
}
+
+ @Override
+ public boolean logFile(File file) {
+ for (AuditHandler provider : mProviders) {
+ try {
+ provider.logFile(file);
+ } catch (Throwable excp) {
+ logFailedEventJSON(file.getAbsolutePath(), excp);
+ }
+ }
+ return true;
+ }
+
@Override
public void start() {
for (AuditHandler provider : mProviders) {
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/StandAloneAuditProviderFactory.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/StandAloneAuditProviderFactory.java
index 4306b24c0e..5ed77da538 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/StandAloneAuditProviderFactory.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/StandAloneAuditProviderFactory.java
@@ -18,11 +18,11 @@
package org.apache.ranger.audit.provider;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class StandAloneAuditProviderFactory extends AuditProviderFactory {
- private static final Log LOG = LogFactory.getLog(StandAloneAuditProviderFactory.class);
+ private static final Logger LOG = LoggerFactory.getLogger(StandAloneAuditProviderFactory.class);
private volatile static StandAloneAuditProviderFactory sFactory = null;
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsAuditProvider.java
index 65429ad67c..83ff017081 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/hdfs/HdfsAuditProvider.java
@@ -19,17 +19,17 @@
import java.util.Map;
import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.provider.BufferedAuditProvider;
import org.apache.ranger.audit.provider.DebugTracer;
import org.apache.ranger.audit.provider.LocalFileLogBuffer;
import org.apache.ranger.audit.provider.Log4jTracer;
import org.apache.ranger.audit.provider.MiscUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class HdfsAuditProvider extends BufferedAuditProvider {
- private static final Log LOG = LogFactory.getLog(HdfsAuditProvider.class);
+ private static final Logger LOG = LoggerFactory.getLogger(HdfsAuditProvider.class);
public static final String AUDIT_HDFS_IS_ASYNC_PROP = "xasecure.audit.hdfs.is.async";
public static final String AUDIT_HDFS_MAX_QUEUE_SIZE_PROP = "xasecure.audit.hdfs.async.max.queue.size";
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java
index a0c25427a8..3a452c22a6 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/kafka/KafkaAuditProvider.java
@@ -16,7 +16,7 @@
*/
package org.apache.ranger.audit.provider.kafka;
-import java.security.PrivilegedAction;
+import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
@@ -24,16 +24,16 @@
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.destination.AuditDestination;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
import org.apache.ranger.audit.provider.MiscUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class KafkaAuditProvider extends AuditDestination {
- private static final Log LOG = LogFactory.getLog(KafkaAuditProvider.class);
+ private static final Logger LOG = LoggerFactory.getLogger(KafkaAuditProvider.class);
public static final String AUDIT_MAX_QUEUE_SIZE_PROP = "xasecure.audit.kafka.async.max.queue.size";
public static final String AUDIT_MAX_FLUSH_INTERVAL_PROP = "xasecure.audit.kafka.async.max.flush.interval.ms";
@@ -74,18 +74,12 @@ public void init(Properties props) {
LOG.info("Connecting to Kafka producer using properties:"
+ kakfaProps.toString());
- producer = MiscUtil.executePrivilegedAction(new PrivilegedAction>() {
- @Override
- public Producer run(){
- Producer producer = new KafkaProducer(kakfaProps);
- return producer;
- };
- });
+ producer = MiscUtil.executePrivilegedAction((PrivilegedExceptionAction>) () -> new KafkaProducer<>(kakfaProps));
initDone = true;
}
} catch (Throwable t) {
- LOG.fatal("Error initializing kafka:", t);
+ LOG.error("Error initializing kafka:", t);
}
}
@@ -115,12 +109,9 @@ public boolean log(AuditEventBase event) {
final ProducerRecord keyedMessage = new ProducerRecord(
topic, message);
- MiscUtil.executePrivilegedAction(new PrivilegedAction() {
- @Override
- public Void run(){
- producer.send(keyedMessage);
- return null;
- };
+ MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> {
+ producer.send(keyedMessage);
+ return null;
});
} else {
@@ -169,12 +160,9 @@ public void stop() {
LOG.info("stop() called");
if (producer != null) {
try {
- MiscUtil.executePrivilegedAction(new PrivilegedAction() {
- @Override
- public Void run() {
- producer.close();
- return null;
- };
+ MiscUtil.executePrivilegedAction((PrivilegedExceptionAction) () -> {
+ producer.close();
+ return null;
});
} catch (Throwable t) {
LOG.error("Error closing Kafka producer");
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java b/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java
index dac006c6e3..691cef0021 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/provider/solr/SolrAuditProvider.java
@@ -19,14 +19,13 @@
package org.apache.ranger.audit.provider.solr;
+import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.destination.AuditDestination;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
@@ -36,9 +35,11 @@
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.response.UpdateResponse;
import org.apache.solr.common.SolrInputDocument;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class SolrAuditProvider extends AuditDestination {
- private static final Log LOG = LogFactory.getLog(SolrAuditProvider.class);
+ private static final Logger LOG = LoggerFactory.getLogger(SolrAuditProvider.class);
public static final String AUDIT_MAX_QUEUE_SIZE_PROP = "xasecure.audit.solr.async.max.queue.size";
public static final String AUDIT_MAX_FLUSH_INTERVAL_PROP = "xasecure.audit.solr.async.max.flush.interval.ms";
@@ -88,7 +89,7 @@ void connect() {
lastConnectTime = new Date();
if (solrURL == null || solrURL.isEmpty()) {
- LOG.fatal("Solr URL for Audit is empty");
+ LOG.error("Solr URL for Audit is empty");
return;
}
@@ -108,7 +109,7 @@ public SolrClient run() throws Exception {
me = solrClient;
} catch (Throwable t) {
- LOG.fatal("Can't connect to Solr server. URL="
+ LOG.error("Can't connect to Solr server. URL="
+ solrURL, t);
}
}
@@ -229,8 +230,16 @@ public void start() {
*/
@Override
public void stop() {
- // TODO Auto-generated method stub
-
+ LOG.info("SolrAuditProvider.stop() called..");
+ try {
+ if (solrClient != null) {
+ solrClient.close();
+ }
+ } catch (IOException ioe) {
+ LOG.error("Error while stopping slor!", ioe);
+ } finally {
+ solrClient = null;
+ }
}
/*
@@ -281,6 +290,8 @@ SolrInputDocument toSolrDoc(AuthzAuditEvent auditEvent) {
doc.addField("action", auditEvent.getAction());
doc.addField("evtTime", auditEvent.getEventTime());
doc.addField("tags", auditEvent.getTags());
+ doc.addField("datasets", auditEvent.getDatasets());
+ doc.addField("projects", auditEvent.getProjects());
doc.addField("cluster", auditEvent.getClusterName());
doc.addField("zone", auditEvent.getZoneName());
doc.addField("agentHost", auditEvent.getAgentHostname());
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java
index f31772ae54..b226b4e201 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditAsyncQueue.java
@@ -23,17 +23,17 @@
import java.util.Collection;
import java.util.concurrent.LinkedBlockingQueue;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.log4j.MDC;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.provider.AuditHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
/**
* This is a non-blocking queue with no limit on capacity.
*/
public class AuditAsyncQueue extends AuditQueue implements Runnable {
- private static final Log logger = LogFactory.getLog(AuditAsyncQueue.class);
+ private static final Logger logger = LoggerFactory.getLogger(AuditAsyncQueue.class);
LinkedBlockingQueue queue = new LinkedBlockingQueue();
Thread consumerThread = null;
@@ -56,8 +56,13 @@ public AuditAsyncQueue(AuditHandler consumer) {
*/
@Override
public boolean log(AuditEventBase event) {
+ logStatusIfRequired();
+
+ addTotalCount(1);
+
// Add to the queue and return ASAP
if (queue.size() >= getMaxQueueSize()) {
+ addFailedCount(1);
return false;
}
queue.add(event);
@@ -130,10 +135,21 @@ public void run() {
MDC.clear();
runLogAudit();
} catch (Throwable t) {
- logger.fatal("Exited thread abnormaly. queue=" + getName(), t);
+ logger.error("Exited thread abnormaly. queue=" + getName(), t);
}
}
+ @Override
+ public void logStatus() {
+ super.logStatus();
+
+ if (isStatusLogEnabled()) {
+ logger.info("AuditAsyncQueue.log(name={}): totalCount={}, currentQueueLength={}", getName(), getTotalCount(), queue.size());
+ }
+ }
+
+ public int size() { return queue.size(); }
+
public void runLogAudit() {
while (true) {
try {
@@ -150,6 +166,8 @@ public void runLogAudit() {
eventList.add(event);
queue.drainTo(eventList, MAX_DRAIN - 1);
consumer.log(eventList);
+
+ logStatusIfRequired();
}
} catch (InterruptedException e) {
logger.info("Caught exception in consumer thread. Shutdown might be in progress");
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java
index 113a230167..103f926566 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditBatchQueue.java
@@ -26,14 +26,14 @@
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.log4j.MDC;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.provider.AuditHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
public class AuditBatchQueue extends AuditQueue implements Runnable {
- private static final Log logger = LogFactory.getLog(AuditBatchQueue.class);
+ private static final Logger logger = LoggerFactory.getLogger(AuditBatchQueue.class);
private BlockingQueue queue = null;
private Collection localBatchBuffer = new ArrayList();
@@ -56,8 +56,13 @@ public AuditBatchQueue(AuditHandler consumer) {
*/
@Override
public boolean log(AuditEventBase event) {
- // Add to batchQueue. Block if full
- queue.add(event);
+ try {
+ // Add to batchQueue. Block if full
+ queue.put(event);
+ } catch (InterruptedException ex) {
+ throw new RuntimeException(ex);
+ }
+
return true;
}
@@ -214,7 +219,7 @@ public void run() {
MDC.clear();
runLogAudit();
} catch (Throwable t) {
- logger.fatal("Exited thread abnormaly. queue=" + getName(), t);
+ logger.error("Exited thread abnormaly. queue=" + getName(), t);
}
}
@@ -232,15 +237,12 @@ public void runLogAudit() {
boolean fileSpoolDrain = false;
try {
if (fileSpoolerEnabled && fileSpooler.isPending()) {
- int percentUsed = queue.size() * 100
- / getMaxQueueSize();
- long lastAttemptDelta = fileSpooler
- .getLastAttemptTimeDelta();
+ int percentUsed = queue.size() * 100 / getMaxQueueSize();
+ long lastAttemptDelta = fileSpooler.getLastAttemptTimeDelta();
fileSpoolDrain = lastAttemptDelta > fileSpoolMaxWaitTime;
// If we should even read from queue?
- if (!isDrain() && !fileSpoolDrain
- && percentUsed < fileSpoolDrainThresholdPercent) {
+ if (!isDrain() && !fileSpoolDrain && percentUsed < fileSpoolDrainThresholdPercent) {
// Since some files are still under progress and it is
// not in drain mode, lets wait and retry
if (nextDispatchDuration > 0) {
@@ -254,10 +256,8 @@ public void runLogAudit() {
AuditEventBase event = null;
- if (!isToSpool && !isDrain() && !fileSpoolDrain
- && nextDispatchDuration > 0) {
- event = queue.poll(nextDispatchDuration,
- TimeUnit.MILLISECONDS);
+ if (!isToSpool && !isDrain() && !fileSpoolDrain && nextDispatchDuration > 0) {
+ event = queue.poll(nextDispatchDuration, TimeUnit.MILLISECONDS);
} else {
// For poll() is non blocking
event = queue.poll();
@@ -266,15 +266,11 @@ public void runLogAudit() {
if (event != null) {
localBatchBuffer.add(event);
if (getMaxBatchSize() >= localBatchBuffer.size()) {
- queue.drainTo(localBatchBuffer, getMaxBatchSize()
- - localBatchBuffer.size());
+ queue.drainTo(localBatchBuffer, getMaxBatchSize() - localBatchBuffer.size());
}
} else {
// poll returned due to timeout, so reseting clock
- nextDispatchDuration = lastDispatchTime
- - System.currentTimeMillis()
- + getMaxBatchInterval();
-
+ nextDispatchDuration = lastDispatchTime - System.currentTimeMillis() + getMaxBatchInterval();
lastDispatchTime = System.currentTimeMillis();
}
} catch (InterruptedException e) {
@@ -288,8 +284,7 @@ public void runLogAudit() {
if (localBatchBuffer.size() > 0 && isToSpool) {
// Let spool to the file directly
if (isDestActive) {
- logger.info("Switching to file spool. Queue=" + getName()
- + ", dest=" + consumer.getName());
+ logger.info("Switching to file spool. Queue = {}, dest = {}", getName(), consumer.getName());
}
isDestActive = false;
// Just before stashing
@@ -297,20 +292,18 @@ public void runLogAudit() {
fileSpooler.stashLogs(localBatchBuffer);
addStashedCount(localBatchBuffer.size());
localBatchBuffer.clear();
- } else if (localBatchBuffer.size() > 0
- && (isDrain()
- || localBatchBuffer.size() >= getMaxBatchSize() || nextDispatchDuration <= 0)) {
+ } else if (localBatchBuffer.size() > 0 &&
+ (isDrain() || localBatchBuffer.size() >= getMaxBatchSize() || nextDispatchDuration <= 0)) {
if (fileSpoolerEnabled && !isDestActive) {
- logger.info("Switching to writing to destination. Queue="
- + getName() + ", dest=" + consumer.getName());
+ logger.info("Switching to writing to the destination. Queue = {}, dest = {}",
+ getName(), consumer.getName());
}
// Reset time just before sending the logs
lastDispatchTime = System.currentTimeMillis();
boolean ret = consumer.log(localBatchBuffer);
if (!ret) {
if (fileSpoolerEnabled) {
- logger.info("Switching to file spool. Queue="
- + getName() + ", dest=" + consumer.getName());
+ logger.info("Switching to file spool. Queue = {}, dest = {}", getName(), consumer.getName());
// Transient error. Stash and move on
fileSpooler.stashLogs(localBatchBuffer);
isDestActive = false;
@@ -329,9 +322,8 @@ public void runLogAudit() {
if (isDrain()) {
if (!queue.isEmpty() || localBatchBuffer.size() > 0) {
- logger.info("Queue is not empty. Will retry. queue.size)="
- + queue.size() + ", localBatchBuffer.size()="
- + localBatchBuffer.size());
+ logger.info("Queue is not empty. Will retry. queue.size = {}, localBatchBuffer.size = {}",
+ queue.size(), localBatchBuffer.size());
} else {
break;
}
@@ -344,12 +336,10 @@ public void runLogAudit() {
}
}
- logger.info("Exiting consumerThread. Queue=" + getName() + ", dest="
- + consumer.getName());
+ logger.info("Exiting consumerThread. Queue = {}, dest = {}", getName(), consumer.getName());
try {
// Call stop on the consumer
- logger.info("Calling to stop consumer. name=" + getName()
- + ", consumer.name=" + consumer.getName());
+ logger.info("Calling to stop consumer. name = {}, consumer.name = {}", getName(), consumer.getName());
consumer.stop();
if (fileSpoolerEnabled) {
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileCacheProviderSpool.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileCacheProviderSpool.java
index 41513ba409..c61d99af58 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileCacheProviderSpool.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileCacheProviderSpool.java
@@ -19,18 +19,32 @@
package org.apache.ranger.audit.queue;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.log4j.MDC;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.provider.AuditHandler;
import org.apache.ranger.audit.model.AuthzAuditEvent;
import org.apache.ranger.audit.provider.MiscUtil;
-
-import java.io.*;
-import java.util.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
@@ -42,7 +56,7 @@
*/
public class AuditFileCacheProviderSpool implements Runnable {
- private static final Log logger = LogFactory.getLog(AuditFileCacheProviderSpool.class);
+ private static final Logger logger = LoggerFactory.getLogger(AuditFileCacheProviderSpool.class);
public enum SPOOL_FILE_STATUS {
pending, write_inprogress, read_inprogress, done
@@ -56,10 +70,10 @@ public enum SPOOL_FILE_STATUS {
public static final String PROP_FILE_SPOOL_FILE_ROLLOVER = "filespool.file.rollover.sec";
public static final String PROP_FILE_SPOOL_INDEX_FILE = "filespool.index.filename";
public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms";
+ public static final String PROP_FILE_SPOOL_BATCH_SIZE = "filespool.buffer.size";
public static final String AUDIT_IS_FILE_CACHE_PROVIDER_ENABLE_PROP = "xasecure.audit.provider.filecache.is.enabled";
public static final String FILE_CACHE_PROVIDER_NAME = "AuditFileCacheProviderSpool";
- public static final int AUDIT_BATCH_SIZE_DEFAULT = 1000;
AuditHandler consumerProvider = null;
@@ -79,6 +93,7 @@ public enum SPOOL_FILE_STATUS {
int fileRolloverSec = 24 * 60 * 60; // In seconds
int maxArchiveFiles = 100;
int errorLogIntervalMS = 30 * 1000; // Every 30 seconds
+ int auditBatchSize = 1000;
long lastErrorLogMS = 0;
boolean isAuditFileCacheProviderEnabled = false;
boolean closeFile = false;
@@ -98,8 +113,6 @@ public enum SPOOL_FILE_STATUS {
boolean isDestDown = false;
boolean isSpoolingSuccessful = true;
- private Gson gson = null;
-
public AuditFileCacheProviderSpool(AuditHandler consumerProvider) {
this.consumerProvider = consumerProvider;
}
@@ -123,8 +136,6 @@ public boolean init(Properties props, String basePropertyName) {
}
try {
- gson = new GsonBuilder().setDateFormat("yyyy-MM-dd HH:mm:ss.SSS")
- .create();
// Initial folder and file properties
String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
+ "." + PROP_FILE_SPOOL_LOCAL_DIR);
@@ -151,7 +162,7 @@ public boolean init(Properties props, String basePropertyName) {
+ FILE_CACHE_PROVIDER_NAME);
if (logFolderProp == null || logFolderProp.isEmpty()) {
- logger.fatal("Audit spool folder is not configured. Please set "
+ logger.error("Audit spool folder is not configured. Please set "
+ propPrefix
+ "."
+ PROP_FILE_SPOOL_LOCAL_DIR
@@ -162,7 +173,7 @@ public boolean init(Properties props, String basePropertyName) {
if (!logFolder.isDirectory()) {
boolean result = logFolder.mkdirs();
if (!logFolder.isDirectory() || !result) {
- logger.fatal("File Spool folder not found and can't be created. folder="
+ logger.error("File Spool folder not found and can't be created. folder="
+ logFolder.getAbsolutePath()
+ ", queueName="
+ FILE_CACHE_PROVIDER_NAME);
@@ -212,7 +223,7 @@ public boolean init(Properties props, String basePropertyName) {
if (!indexFile.exists()) {
boolean ret = indexFile.createNewFile();
if (!ret) {
- logger.fatal("Error creating index file. fileName="
+ logger.error("Error creating index file. fileName="
+ indexFile.getPath());
return false;
}
@@ -230,7 +241,7 @@ public boolean init(Properties props, String basePropertyName) {
if (!indexDoneFile.exists()) {
boolean ret = indexDoneFile.createNewFile();
if (!ret) {
- logger.fatal("Error creating index done file. fileName="
+ logger.error("Error creating index done file. fileName="
+ indexDoneFile.getPath());
return false;
}
@@ -271,10 +282,14 @@ public boolean init(Properties props, String basePropertyName) {
}
} catch (Throwable t) {
- logger.fatal("Error initializing File Spooler. queue="
+ logger.error("Error initializing File Spooler. queue="
+ FILE_CACHE_PROVIDER_NAME, t);
return false;
}
+
+ auditBatchSize = MiscUtil.getIntProperty(props, propPrefix
+ + "." + PROP_FILE_SPOOL_BATCH_SIZE, auditBatchSize);
+
initDone = true;
logger.debug("<== AuditFileCacheProviderSpool.init()");
@@ -576,9 +591,13 @@ void loadIndexFile() throws IOException {
String line;
while ((line = br.readLine()) != null) {
if (!line.isEmpty() && !line.startsWith("#")) {
- AuditIndexRecord record = gson.fromJson(line,
- AuditIndexRecord.class);
- indexRecords.add(record);
+ try {
+ AuditIndexRecord record = MiscUtil.fromJson(line,
+ AuditIndexRecord.class);
+ indexRecords.add(record);
+ } catch (Exception e) {
+ logger.error("Error parsing following JSON: "+line, e);
+ }
}
}
} finally {
@@ -624,7 +643,7 @@ synchronized void removeIndexRecord(AuditIndexRecord indexRecord)
synchronized void saveIndexFile() throws FileNotFoundException, IOException {
PrintWriter out = new PrintWriter(indexFile,"UTF-8");
for (AuditIndexRecord auditIndexRecord : indexRecords) {
- out.println(gson.toJson(auditIndexRecord));
+ out.println(MiscUtil.stringify(auditIndexRecord));
}
out.close();
// printIndex();
@@ -636,7 +655,7 @@ void appendToDoneFile(AuditIndexRecord indexRecord)
logger.info("Moving to done file. " + indexRecord.filePath
+ ", queueName=" + FILE_CACHE_PROVIDER_NAME + ", consumer="
+ consumerProvider.getName());
- String line = gson.toJson(indexRecord);
+ String line = MiscUtil.stringify(indexRecord);
PrintWriter out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(
indexDoneFile, true),"UTF-8")));
out.println(line);
@@ -684,7 +703,8 @@ public boolean accept(File pathname) {
int filesDeletedCount = 0;
while ((line = br.readLine()) != null) {
if (!line.isEmpty() && !line.startsWith("#")) {
- AuditIndexRecord record = gson.fromJson(line,
+ try {
+ AuditIndexRecord record = MiscUtil.fromJson(line,
AuditIndexRecord.class);
logFile = new File(record.filePath);
String fileName = logFile.getName();
@@ -704,6 +724,9 @@ public boolean accept(File pathname) {
break;
}
}
+ } catch (Exception e) {
+ logger.error("Error parsing following JSON: "+line, e);
+ }
}
}
} finally {
@@ -765,7 +788,7 @@ public void run() {
MDC.clear();
runLogAudit();
} catch (Throwable t) {
- logger.fatal("Exited thread without abnormaly. queue="
+ logger.error("Exited thread without abnormaly. queue="
+ consumerProvider.getName(), t);
}
}
@@ -824,7 +847,7 @@ public void runLogAudit() {
AuditEventBase event = MiscUtil.fromJson(line, AuthzAuditEvent.class);
events.add(event);
- if (events.size() == AUDIT_BATCH_SIZE_DEFAULT) {
+ if (events.size() == auditBatchSize) {
boolean ret = sendEvent(events,
currentConsumerIndexRecord, currLine);
if (!ret) {
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueue.java
new file mode 100644
index 0000000000..a4e0683665
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueue.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.ranger.audit.queue;
+
+import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.provider.AuditHandler;
+import org.apache.ranger.audit.provider.BaseAuditHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collection;
+import java.util.Properties;
+
+/*
+ AuditFileQueue class does the work of stashing the audit logs into Local Filesystem before sending it to the AuditBatchQueue Consumer
+*/
+
+public class AuditFileQueue extends BaseAuditHandler {
+ private static final Logger logger = LoggerFactory.getLogger(AuditFileQueue.class);
+
+ AuditFileQueueSpool fileSpooler = null;
+ AuditHandler consumer = null;
+
+ static final String DEFAULT_NAME = "batch";
+
+ public AuditFileQueue(AuditHandler consumer) {
+ this.consumer = consumer;
+ }
+
+ public void init(Properties prop, String basePropertyName) {
+ String propPrefix = "xasecure.audit.batch";
+ if (basePropertyName != null) {
+ propPrefix = basePropertyName;
+ }
+ super.init(prop, propPrefix);
+
+ //init AuditFileQueueSpooler thread to send Local logs to destination
+ fileSpooler = new AuditFileQueueSpool(consumer);
+ fileSpooler.init(prop,propPrefix);
+ }
+
+ @Override
+ public boolean log(AuditEventBase event) {
+ boolean ret = false;
+ if ( event != null) {
+ fileSpooler.stashLogs(event);
+ if (fileSpooler.isSpoolingSuccessful()) {
+ ret = true;
+ }
+ }
+ return ret;
+ }
+
+ @Override
+ public boolean log(Collection events) {
+ boolean ret = true;
+ if ( events != null) {
+ for (AuditEventBase event : events) {
+ ret = log(event);
+ }
+ }
+ return ret;
+ }
+
+
+ @Override
+ public void start() {
+ // Start the consumer thread
+ if (consumer != null) {
+ consumer.start();
+ }
+ if (fileSpooler != null) {
+ // start AuditFileSpool thread
+ fileSpooler.start();
+ }
+ }
+
+ @Override
+ public void stop() {
+ logger.info("Stop called. name=" + getName());
+ if (consumer != null) {
+ consumer.stop();
+ }
+ }
+
+ @Override
+ public void waitToComplete() {
+ logger.info("waitToComplete called. name=" + getName());
+ if ( consumer != null) {
+ consumer.waitToComplete();
+ }
+ }
+
+ @Override
+ public void waitToComplete(long timeout) {
+ logger.info("waitToComplete called. name=" + getName());
+ if ( consumer != null) {
+ consumer.waitToComplete(timeout);
+ }
+ }
+
+ @Override
+ public void flush() {
+ logger.info("waitToComplete. name=" + getName());
+ if ( consumer != null) {
+ consumer.flush();
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueueSpool.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueueSpool.java
new file mode 100644
index 0000000000..f87ec55ab1
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileQueueSpool.java
@@ -0,0 +1,987 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied. See the License for the
+* specific language governing permissions and limitations
+* under the License.
+*/
+
+package org.apache.ranger.audit.queue;
+
+import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.model.AuditIndexRecord;
+import org.apache.ranger.audit.model.AuthzAuditEvent;
+import org.apache.ranger.audit.model.SPOOL_FILE_STATUS;
+import org.apache.ranger.audit.provider.AuditHandler;
+import org.apache.ranger.audit.provider.MiscUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileFilter;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This class temporarily stores logs in Local file system before it despatches each logs in file to the AuditBatchQueue Consumer.
+ * This gets instantiated only when AuditFileCacheProvider is enabled (xasecure.audit.provider.filecache.is.enabled).
+ * When AuditFileCacheProvider is enabled all the logs are stored in local file system before sent to destination.
+ */
+
+public class AuditFileQueueSpool implements Runnable {
+ private static final Logger logger = LoggerFactory.getLogger(AuditFileQueueSpool.class);
+
+ public static final String PROP_FILE_SPOOL_LOCAL_DIR = "filespool.dir";
+ public static final String PROP_FILE_SPOOL_LOCAL_FILE_NAME = "filespool.filename.format";
+ public static final String PROP_FILE_SPOOL_ARCHIVE_DIR = "filespool.archive.dir";
+ public static final String PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT = "filespool.archive.max.files";
+ public static final String PROP_FILE_SPOOL_FILENAME_PREFIX = "filespool.file.prefix";
+ public static final String PROP_FILE_SPOOL_FILE_ROLLOVER = "filespool.file.rollover.sec";
+ public static final String PROP_FILE_SPOOL_INDEX_FILE = "filespool.index.filename";
+ public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms";
+ public static final String PROP_FILE_SPOOL_BATCH_SIZE = "filespool.buffer.size";
+ public static final String FILE_QUEUE_PROVIDER_NAME = "AuditFileQueueSpool";
+ public static final String DEFAULT_AUDIT_FILE_TYPE = "json";
+
+ AuditHandler consumerProvider = null;
+ BlockingQueue indexQueue = new LinkedBlockingQueue();
+ List indexRecords = new ArrayList();
+
+ // Folder and File attributes
+ File logFolder = null;
+ String logFileNameFormat = null;
+ File archiveFolder = null;
+ String fileNamePrefix = null;
+ String indexFileName = null;
+ File indexFile = null;
+ String indexDoneFileName = null;
+ String auditFileType = null;
+ File indexDoneFile = null;
+ int retryDestinationMS = 30 * 1000; // Default 30 seconds
+ int fileRolloverSec = 24 * 60 * 60; // In seconds
+ int maxArchiveFiles = 100;
+ int errorLogIntervalMS = 30 * 1000; // Every 30 seconds
+ long lastErrorLogMS = 0;
+ boolean isAuditFileCacheProviderEnabled = false;
+ boolean closeFile = false;
+ boolean isPending = false;
+ long lastAttemptTime = 0;
+ long bufferSize = 1000;
+ boolean initDone = false;
+
+ PrintWriter logWriter = null;
+ AuditIndexRecord currentWriterIndexRecord = null;
+ AuditIndexRecord currentConsumerIndexRecord = null;
+
+ BufferedReader logReader = null;
+ Thread destinationThread = null;
+
+ boolean isWriting = true;
+ boolean isDrain = false;
+ boolean isDestDown = false;
+ boolean isSpoolingSuccessful = true;
+
+ public AuditFileQueueSpool(AuditHandler consumerProvider) {
+ this.consumerProvider = consumerProvider;
+ }
+
+ public void init(Properties prop) {
+ init(prop, null);
+ }
+
+ public boolean init(Properties props, String basePropertyName) {
+ logger.debug("==> AuditFileQueueSpool.init()");
+
+ if (initDone) {
+ logger.error("init() called more than once. queueProvider="
+ + "" + ", consumerProvider="
+ + consumerProvider.getName());
+ return true;
+ }
+ String propPrefix = "xasecure.audit.filespool";
+ if (basePropertyName != null) {
+ propPrefix = basePropertyName;
+ }
+
+ try {
+ // Initial folder and file properties
+ String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
+ + "." + PROP_FILE_SPOOL_LOCAL_DIR);
+ logFileNameFormat = MiscUtil.getStringProperty(props,
+ basePropertyName + "." + PROP_FILE_SPOOL_LOCAL_FILE_NAME);
+ String archiveFolderProp = MiscUtil.getStringProperty(props,
+ propPrefix + "." + PROP_FILE_SPOOL_ARCHIVE_DIR);
+ fileNamePrefix = MiscUtil.getStringProperty(props, propPrefix + "."
+ + PROP_FILE_SPOOL_FILENAME_PREFIX);
+ indexFileName = MiscUtil.getStringProperty(props, propPrefix + "."
+ + PROP_FILE_SPOOL_INDEX_FILE);
+ retryDestinationMS = MiscUtil.getIntProperty(props, propPrefix
+ + "." + PROP_FILE_SPOOL_DEST_RETRY_MS, retryDestinationMS);
+ fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "."
+ + PROP_FILE_SPOOL_FILE_ROLLOVER, fileRolloverSec);
+ maxArchiveFiles = MiscUtil.getIntProperty(props, propPrefix + "."
+ + PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, maxArchiveFiles);
+ logger.info("retryDestinationMS=" + retryDestinationMS
+ + ", queueName=" + FILE_QUEUE_PROVIDER_NAME);
+ logger.info("fileRolloverSec=" + fileRolloverSec + ", queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+ logger.info("maxArchiveFiles=" + maxArchiveFiles + ", queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+
+ if (logFolderProp == null || logFolderProp.isEmpty()) {
+ logger.error("Audit spool folder is not configured. Please set "
+ + propPrefix
+ + "."
+ + PROP_FILE_SPOOL_LOCAL_DIR
+ + ". queueName=" + FILE_QUEUE_PROVIDER_NAME);
+ return false;
+ }
+ logFolder = new File(logFolderProp);
+ if (!logFolder.isDirectory()) {
+ boolean result = logFolder.mkdirs();
+ if (!logFolder.isDirectory() || !result) {
+ logger.error("File Spool folder not found and can't be created. folder="
+ + logFolder.getAbsolutePath()
+ + ", queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+ return false;
+ }
+ }
+ logger.info("logFolder=" + logFolder + ", queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+
+ if (logFileNameFormat == null || logFileNameFormat.isEmpty()) {
+ logFileNameFormat = "spool_" + "%app-type%" + "_"
+ + "%time:yyyyMMdd-HHmm.ss%.log";
+ }
+ logger.info("logFileNameFormat=" + logFileNameFormat
+ + ", queueName=" + FILE_QUEUE_PROVIDER_NAME);
+
+ if (archiveFolderProp == null || archiveFolderProp.isEmpty()) {
+ archiveFolder = new File(logFolder, "archive");
+ } else {
+ archiveFolder = new File(archiveFolderProp);
+ }
+ if (!archiveFolder.isDirectory()) {
+ boolean result = archiveFolder.mkdirs();
+ if (!archiveFolder.isDirectory() || !result) {
+ logger.error("File Spool archive folder not found and can't be created. folder="
+ + archiveFolder.getAbsolutePath()
+ + ", queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+ return false;
+ }
+ }
+ logger.info("archiveFolder=" + archiveFolder + ", queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+
+ if (indexFileName == null || indexFileName.isEmpty()) {
+ if (fileNamePrefix == null || fileNamePrefix.isEmpty()) {
+ fileNamePrefix = FILE_QUEUE_PROVIDER_NAME + "_"
+ + consumerProvider.getName();
+ }
+ indexFileName = "index_" + fileNamePrefix + "_" + "%app-type%"
+ + ".json";
+ indexFileName = MiscUtil.replaceTokens(indexFileName,
+ System.currentTimeMillis());
+ }
+
+ indexFile = new File(logFolder, indexFileName);
+ if (!indexFile.exists()) {
+ boolean ret = indexFile.createNewFile();
+ if (!ret) {
+ logger.error("Error creating index file. fileName="
+ + indexFile.getPath());
+ return false;
+ }
+ }
+ logger.info("indexFile=" + indexFile + ", queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+
+ int lastDot = indexFileName.lastIndexOf('.');
+ if (lastDot < 0) {
+ lastDot = indexFileName.length() - 1;
+ }
+ indexDoneFileName = indexFileName.substring(0, lastDot)
+ + "_closed.json";
+ indexDoneFile = new File(logFolder, indexDoneFileName);
+ if (!indexDoneFile.exists()) {
+ boolean ret = indexDoneFile.createNewFile();
+ if (!ret) {
+ logger.error("Error creating index done file. fileName="
+ + indexDoneFile.getPath());
+ return false;
+ }
+ }
+ logger.info("indexDoneFile=" + indexDoneFile + ", queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+
+ // Load index file
+ loadIndexFile();
+ for (AuditIndexRecord auditIndexRecord : indexRecords) {
+ if (!auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.done)) {
+ isPending = true;
+ }
+ if (auditIndexRecord.getStatus()
+ .equals(SPOOL_FILE_STATUS.write_inprogress)) {
+ currentWriterIndexRecord = auditIndexRecord;
+ logger.info("currentWriterIndexRecord="
+ + currentWriterIndexRecord.getFilePath()
+ + ", queueName=" + FILE_QUEUE_PROVIDER_NAME);
+ }
+ if (auditIndexRecord.getStatus()
+ .equals(SPOOL_FILE_STATUS.read_inprogress)) {
+ indexQueue.add(auditIndexRecord);
+ }
+ }
+ printIndex();
+ for (int i = 0; i < indexRecords.size(); i++) {
+ AuditIndexRecord auditIndexRecord = indexRecords.get(i);
+ if (auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.pending)) {
+ File consumerFile = new File(auditIndexRecord.getFilePath());
+ if (!consumerFile.exists()) {
+ logger.error("INIT: Consumer file="
+ + consumerFile.getPath() + " not found.");
+ } else {
+ indexQueue.add(auditIndexRecord);
+ }
+ }
+ }
+
+ auditFileType = MiscUtil.getStringProperty(props, propPrefix + ".filetype", DEFAULT_AUDIT_FILE_TYPE);
+ if (auditFileType == null) {
+ auditFileType = DEFAULT_AUDIT_FILE_TYPE;
+ }
+
+ } catch (Throwable t) {
+ logger.error("Error initializing File Spooler. queue="
+ + FILE_QUEUE_PROVIDER_NAME, t);
+ return false;
+ }
+
+ bufferSize = MiscUtil.getLongProperty(props, propPrefix
+ + "." + PROP_FILE_SPOOL_BATCH_SIZE, bufferSize);
+
+ initDone = true;
+
+ logger.debug("<== AuditFileQueueSpool.init()");
+ return true;
+ }
+
+ /**
+ * Start looking for outstanding logs and update status according.
+ */
+ public void start() {
+ if (!initDone) {
+ logger.error("Cannot start Audit File Spooler. Initilization not done yet. queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+ return;
+ }
+
+ logger.info("Starting writerThread, queueName="
+ + FILE_QUEUE_PROVIDER_NAME + ", consumer="
+ + consumerProvider.getName());
+
+ // Let's start the thread to read
+ destinationThread = new Thread(this, FILE_QUEUE_PROVIDER_NAME + "_"
+ + consumerProvider.getName() + "_destWriter");
+ destinationThread.setDaemon(true);
+ destinationThread.start();
+ }
+
+ public void stop() {
+ if (!initDone) {
+ logger.error("Cannot stop Audit File Spooler. Initilization not done. queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+ return;
+ }
+ logger.info("Stop called, queueName=" + FILE_QUEUE_PROVIDER_NAME
+ + ", consumer=" + consumerProvider.getName());
+
+ isDrain = true;
+ flush();
+
+ PrintWriter out = getOpenLogFileStream();
+ if (out != null) {
+ // If write is still going on, then let's give it enough time to
+ // complete
+ for (int i = 0; i < 3; i++) {
+ if (isWriting) {
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ continue;
+ }
+ try {
+ logger.info("Closing open file, queueName="
+ + FILE_QUEUE_PROVIDER_NAME + ", consumer="
+ + consumerProvider.getName());
+
+ out.flush();
+ out.close();
+ break;
+ } catch (Throwable t) {
+ logger.debug("Error closing spool out file.", t);
+ }
+ }
+ }
+ try {
+ if (destinationThread != null) {
+ destinationThread.interrupt();
+ }
+ destinationThread = null;
+ } catch (Throwable e) {
+ // ignore
+ }
+ }
+
+ public void flush() {
+ if (!initDone) {
+ logger.error("Cannot flush Audit File Spooler. Initilization not done. queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+ return;
+ }
+ PrintWriter out = getOpenLogFileStream();
+ if (out != null) {
+ out.flush();
+ }
+ }
+
+ /**
+ * If any files are still not processed. Also, if the destination is not
+ * reachable
+ *
+ * @return
+ */
+ public boolean isPending() {
+ if (!initDone) {
+ logError("isPending(): File Spooler not initialized. queueName="
+ + FILE_QUEUE_PROVIDER_NAME);
+ return false;
+ }
+
+ return isPending;
+ }
+
+ /**
+ * Milliseconds from last attempt time
+ *
+ * @return
+ */
+ public long getLastAttemptTimeDelta() {
+ if (lastAttemptTime == 0) {
+ return 0;
+ }
+ return System.currentTimeMillis() - lastAttemptTime;
+ }
+
+ synchronized public void stashLogs(AuditEventBase event) {
+
+ if (isDrain) {
+ // Stop has been called, so this method shouldn't be called
+ logger.error("stashLogs() is called after stop is called. event="
+ + event);
+ return;
+ }
+ try {
+ isWriting = true;
+ PrintWriter logOut = getLogFileStream();
+ // Convert event to json
+ String jsonStr = MiscUtil.stringify(event);
+ logOut.println(jsonStr);
+ logOut.flush();
+ isPending = true;
+ isSpoolingSuccessful = true;
+ } catch (Throwable t) {
+ isSpoolingSuccessful = false;
+ logger.error("Error writing to file. event=" + event, t);
+ } finally {
+ isWriting = false;
+ }
+
+ }
+
+ synchronized public void stashLogs(Collection events) {
+ for (AuditEventBase event : events) {
+ stashLogs(event);
+ }
+ flush();
+ }
+
+ synchronized public void stashLogsString(String event) {
+ if (isDrain) {
+ // Stop has been called, so this method shouldn't be called
+ logger.error("stashLogs() is called after stop is called. event="
+ + event);
+ return;
+ }
+ try {
+ isWriting = true;
+ PrintWriter logOut = getLogFileStream();
+ logOut.println(event);
+ } catch (Exception ex) {
+ logger.error("Error writing to file. event=" + event, ex);
+ } finally {
+ isWriting = false;
+ }
+
+ }
+
+ synchronized public boolean isSpoolingSuccessful() {
+ return isSpoolingSuccessful;
+ }
+
+ synchronized public void stashLogsString(Collection events) {
+ for (String event : events) {
+ stashLogsString(event);
+ }
+ flush();
+ }
+
+ /**
+ * This return the current file. If there are not current open output file,
+ * then it will return null
+ *
+ * @return
+ * @throws Exception
+ */
+ synchronized private PrintWriter getOpenLogFileStream() {
+ return logWriter;
+ }
+
+ /**
+ * @return
+ * @throws Exception
+ */
+ synchronized private PrintWriter getLogFileStream() throws Exception {
+ closeFileIfNeeded();
+ // Either there are no open log file or the previous one has been rolled
+ // over
+ if (currentWriterIndexRecord == null) {
+ Date currentTime = new Date();
+ // Create a new file
+ String fileName = MiscUtil.replaceTokens(logFileNameFormat,
+ currentTime.getTime());
+ String newFileName = fileName;
+ File outLogFile = null;
+ int i = 0;
+ while (true) {
+ outLogFile = new File(logFolder, newFileName);
+ File archiveLogFile = new File(archiveFolder, newFileName);
+ if (!outLogFile.exists() && !archiveLogFile.exists()) {
+ break;
+ }
+ i++;
+ int lastDot = fileName.lastIndexOf('.');
+ String baseName = fileName.substring(0, lastDot);
+ String extension = fileName.substring(lastDot);
+ newFileName = baseName + "." + i + extension;
+ }
+ fileName = newFileName;
+ logger.info("Creating new file. queueName="
+ + FILE_QUEUE_PROVIDER_NAME + ", fileName=" + fileName);
+ // Open the file
+ logWriter = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(
+ outLogFile),"UTF-8")));
+
+ AuditIndexRecord tmpIndexRecord = new AuditIndexRecord();
+
+ tmpIndexRecord.setId(MiscUtil.generateUniqueId());
+ tmpIndexRecord.setFilePath(outLogFile.getPath());
+ tmpIndexRecord.setStatus(SPOOL_FILE_STATUS.write_inprogress);
+ tmpIndexRecord.setFileCreateTime(currentTime);
+ tmpIndexRecord.setLastAttempt(true);
+ currentWriterIndexRecord = tmpIndexRecord;
+ indexRecords.add(currentWriterIndexRecord);
+ saveIndexFile();
+
+ } else {
+ if (logWriter == null) {
+ // This means the process just started. We need to open the file
+ // in append mode.
+ logger.info("Opening existing file for append. queueName="
+ + FILE_QUEUE_PROVIDER_NAME + ", fileName="
+ + currentWriterIndexRecord.getFilePath());
+ logWriter = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(
+ currentWriterIndexRecord.getFilePath(), true),"UTF-8")));
+ }
+ }
+ return logWriter;
+ }
+
+ synchronized private void closeFileIfNeeded() throws FileNotFoundException,
+ IOException {
+ // Is there file open to write or there are no pending file, then close
+ // the active file
+ if (currentWriterIndexRecord != null) {
+ // Check whether the file needs to rolled
+ rollOverSpoolFileByTime();
+
+ if (closeFile) {
+ // Roll the file
+ if (logWriter != null) {
+ logWriter.flush();
+ logWriter.close();
+ logWriter = null;
+ closeFile = false;
+ }
+ currentWriterIndexRecord.setStatus(SPOOL_FILE_STATUS.pending);
+ currentWriterIndexRecord.setWriteCompleteTime(new Date());
+ saveIndexFile();
+ logger.info("Adding file to queue. queueName="
+ + FILE_QUEUE_PROVIDER_NAME + ", fileName="
+ + currentWriterIndexRecord.getFilePath());
+ indexQueue.add(currentWriterIndexRecord);
+ currentWriterIndexRecord = null;
+ }
+ }
+ }
+
+ private void rollOverSpoolFileByTime() {
+ if (System.currentTimeMillis()
+ - currentWriterIndexRecord.getFileCreateTime().getTime() > fileRolloverSec * 1000) {
+ closeFile = true;
+ logger.info("Closing file. Rolling over. queueName="
+ + FILE_QUEUE_PROVIDER_NAME + ", fileName="
+ + currentWriterIndexRecord.getFilePath());
+ }
+ }
+
+ /**
+ * Load the index file
+ *
+ * @throws IOException
+ */
+ void loadIndexFile() throws IOException {
+ logger.info("Loading index file. fileName=" + indexFile.getPath());
+ BufferedReader br = null;
+ try {
+ br = new BufferedReader(new InputStreamReader(new FileInputStream(indexFile), "UTF-8"));
+ indexRecords.clear();
+ String line;
+ while ((line = br.readLine()) != null) {
+ if (!line.isEmpty() && !line.startsWith("#")) {
+ try {
+ AuditIndexRecord record = MiscUtil.fromJson(line,
+ AuditIndexRecord.class);
+ indexRecords.add(record);
+ } catch (Exception e) {
+ logger.error("Error parsing following JSON: "+line, e);
+ }
+ }
+ }
+ } finally {
+ if (br!= null) {
+ br.close();
+ }
+ }
+ }
+
+ synchronized void printIndex() {
+ logger.info("INDEX printIndex() ==== START");
+ Iterator iter = indexRecords.iterator();
+ while (iter.hasNext()) {
+ AuditIndexRecord record = iter.next();
+ logger.info("INDEX=" + record + ", isFileExist="
+ + (new File(record.getFilePath()).exists()));
+ }
+ logger.info("INDEX printIndex() ==== END");
+ }
+
+ synchronized void removeIndexRecord(AuditIndexRecord indexRecord)
+ throws FileNotFoundException, IOException {
+ Iterator iter = indexRecords.iterator();
+ while (iter.hasNext()) {
+ AuditIndexRecord record = iter.next();
+ if (record.getId().equals(indexRecord.getId())) {
+ logger.info("Removing file from index. file=" + record.getFilePath()
+ + ", queueName=" + FILE_QUEUE_PROVIDER_NAME
+ + ", consumer=" + consumerProvider.getName());
+
+ iter.remove();
+ appendToDoneFile(record);
+ }
+ }
+ saveIndexFile();
+ // If there are no more files in the index, then let's assume the
+ // destination is now available
+ if (indexRecords.size() == 0) {
+ isPending = false;
+ }
+ }
+
+ synchronized void saveIndexFile() throws FileNotFoundException, IOException {
+ try (PrintWriter out = new PrintWriter(indexFile, "UTF-8")) {
+ for (AuditIndexRecord auditIndexRecord : indexRecords) {
+ out.println(MiscUtil.stringify(auditIndexRecord));
+ }
+ }
+ }
+
+ void appendToDoneFile(AuditIndexRecord indexRecord)
+ throws FileNotFoundException, IOException {
+ logger.info("Moving to done file. " + indexRecord.getFilePath()
+ + ", queueName=" + FILE_QUEUE_PROVIDER_NAME + ", consumer="
+ + consumerProvider.getName());
+ String line = MiscUtil.stringify(indexRecord);
+ PrintWriter out = new PrintWriter(new BufferedWriter(new OutputStreamWriter(new FileOutputStream(
+ indexDoneFile, true),"UTF-8")));
+ out.println(line);
+ out.flush();
+ out.close();
+
+ // After Each file is read and audit events are pushed into pipe, we flush to reach the destination immediate.
+ consumerProvider.flush();
+
+ // Move to archive folder
+ File logFile = null;
+ File archiveFile = null;
+ try {
+ logFile = new File(indexRecord.getFilePath());
+ String fileName = logFile.getName();
+ archiveFile = new File(archiveFolder, fileName);
+ logger.info("Moving logFile " + logFile + " to " + archiveFile);
+ boolean result = logFile.renameTo(archiveFile);
+ if (!result) {
+ logger.error("Error moving log file to archive folder. Unable to rename"
+ + logFile + " to archiveFile=" + archiveFile);
+ }
+ } catch (Throwable t) {
+ logger.error("Error moving log file to archive folder. logFile="
+ + logFile + ", archiveFile=" + archiveFile, t);
+ }
+
+ // After archiving the file flush the pipe
+ consumerProvider.flush();
+
+ archiveFile = null;
+ try {
+ // Remove old files
+ File[] logFiles = archiveFolder.listFiles(new FileFilter() {
+ public boolean accept(File pathname) {
+ return pathname.getName().toLowerCase().endsWith(".log");
+ }
+ });
+
+ if (logFiles != null && logFiles.length > maxArchiveFiles) {
+ int filesToDelete = logFiles.length - maxArchiveFiles;
+ BufferedReader br = new BufferedReader(new FileReader(
+ indexDoneFile));
+ try {
+ int filesDeletedCount = 0;
+ while ((line = br.readLine()) != null) {
+ if (!line.isEmpty() && !line.startsWith("#")) {
+ try {
+ AuditIndexRecord record = MiscUtil.fromJson(line,
+ AuditIndexRecord.class);
+ logFile = new File(record.getFilePath());
+ String fileName = logFile.getName();
+ archiveFile = new File(archiveFolder, fileName);
+ if (archiveFile.exists()) {
+ logger.info("Deleting archive file "
+ + archiveFile);
+ boolean ret = archiveFile.delete();
+ if (!ret) {
+ logger.error("Error deleting archive file. archiveFile="
+ + archiveFile);
+ }
+ filesDeletedCount++;
+ if (filesDeletedCount >= filesToDelete) {
+ logger.info("Deleted " + filesDeletedCount
+ + " files");
+ break;
+ }
+ }
+ } catch (Exception e) {
+ logger.error("Error parsing following JSON: "+line, e);
+ }
+ }
+ }
+ } finally {
+ br.close();
+ }
+ }
+ } catch (Throwable t) {
+ logger.error("Error deleting older archive file. archiveFile="
+ + archiveFile, t);
+ }
+
+ }
+
+ void logError(String msg) {
+ long currTimeMS = System.currentTimeMillis();
+ if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) {
+ logger.error(msg);
+ lastErrorLogMS = currTimeMS;
+ }
+ }
+
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Runnable#run()
+ */
+ @Override
+ public void run() {
+ try {
+ //This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox
+ MDC.clear();
+ runLogAudit();
+ } catch (Throwable t) {
+ logger.error("Exited thread without abnormaly. queue="
+ + consumerProvider.getName(), t);
+ }
+ }
+
+ public void runLogAudit() {
+ // boolean isResumed = false;
+ while (true) {
+ try {
+ if (isDestDown) {
+ logger.info("Destination is down. sleeping for "
+ + retryDestinationMS
+ + " milli seconds. indexQueue=" + indexQueue.size()
+ + ", queueName=" + FILE_QUEUE_PROVIDER_NAME
+ + ", consumer=" + consumerProvider.getName());
+ Thread.sleep(retryDestinationMS);
+ }
+ // Let's pause between each iteration
+ if (currentConsumerIndexRecord == null) {
+ currentConsumerIndexRecord = indexQueue.poll(
+ retryDestinationMS, TimeUnit.MILLISECONDS);
+ } else {
+ Thread.sleep(retryDestinationMS);
+ }
+
+ if (isDrain) {
+ // Need to exit
+ break;
+ }
+ if (currentConsumerIndexRecord == null) {
+ closeFileIfNeeded();
+ continue;
+ }
+
+ boolean isRemoveIndex = false;
+ File consumerFile = new File(
+ currentConsumerIndexRecord.getFilePath());
+ if (!consumerFile.exists()) {
+ logger.error("Consumer file=" + consumerFile.getPath()
+ + " not found.");
+ printIndex();
+ isRemoveIndex = true;
+ } else {
+ // Let's open the file to write
+ BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(
+ currentConsumerIndexRecord.getFilePath()),"UTF-8"));
+ try {
+ if (auditFileType.equalsIgnoreCase(DEFAULT_AUDIT_FILE_TYPE)) {
+ // if Audit File format is JSON each audit file in the Local Spool Location will be copied
+ // to HDFS location as JSON
+ File srcFile = new File(currentConsumerIndexRecord.getFilePath());
+ logFile(srcFile);
+ } else {
+ // If Audit File format is ORC, each records in audit files in the Local Spool Location will be
+ // read and converted into ORC format and pushed into an ORC file.
+ logEvent(br);
+ }
+ logger.info("Done reading file. file="
+ + currentConsumerIndexRecord.getFilePath()
+ + ", queueName=" + FILE_QUEUE_PROVIDER_NAME
+ + ", consumer=" + consumerProvider.getName());
+ // The entire file is read
+ currentConsumerIndexRecord.setStatus(SPOOL_FILE_STATUS.done);
+ currentConsumerIndexRecord.setDoneCompleteTime(new Date());
+ currentConsumerIndexRecord.setLastAttempt(true);
+
+ isRemoveIndex = true;
+ } catch (Exception ex) {
+ isDestDown = true;
+ logError("Destination down. queueName="
+ + FILE_QUEUE_PROVIDER_NAME + ", consumer="
+ + consumerProvider.getName());
+ lastAttemptTime = System.currentTimeMillis();
+ // Update the index file
+ currentConsumerIndexRecord.setLastFailedTime(new Date());
+ currentConsumerIndexRecord.setFailedAttemptCount(currentConsumerIndexRecord.getFailedAttemptCount() + 1);
+ currentConsumerIndexRecord.setLastAttempt(false);
+ saveIndexFile();
+ } finally {
+ br.close();
+ }
+ }
+ if (isRemoveIndex) {
+ // Remove this entry from index
+ removeIndexRecord(currentConsumerIndexRecord);
+ currentConsumerIndexRecord = null;
+ closeFileIfNeeded();
+ }
+ } catch (InterruptedException e) {
+ logger.info("Caught exception in consumer thread. Shutdown might be in progress");
+ } catch (Throwable t) {
+ logger.error("Exception in destination writing thread.", t);
+ }
+ }
+ logger.info("Exiting file spooler. provider=" + FILE_QUEUE_PROVIDER_NAME
+ + ", consumer=" + consumerProvider.getName());
+ }
+
+ private void logEvent(BufferedReader br) throws Exception {
+ String line;
+ int currLine = 0;
+ int startLine = currentConsumerIndexRecord.getLinePosition();
+ List events = new ArrayList<>();
+ while ((line = br.readLine()) != null) {
+ currLine++;
+ if (currLine < startLine) {
+ continue;
+ }
+ AuditEventBase event = MiscUtil.fromJson(line, AuthzAuditEvent.class);
+ events.add(event);
+
+ if (events.size() == bufferSize) {
+ boolean ret = sendEvent(events,
+ currentConsumerIndexRecord, currLine);
+ if (!ret) {
+ throw new Exception("Destination down");
+ }
+ events.clear();
+ }
+ }
+ if (events.size() > 0) {
+ boolean ret = sendEvent(events,
+ currentConsumerIndexRecord, currLine);
+ if (!ret) {
+ throw new Exception("Destination down");
+ }
+ events.clear();
+ }
+ }
+
+ private boolean sendEvent(List events, AuditIndexRecord indexRecord,
+ int currLine) {
+ boolean ret = true;
+ try {
+ ret = consumerProvider.log(events);
+ if (!ret) {
+ // Need to log error after fixed interval
+ logError("Error sending logs to consumer. provider="
+ + FILE_QUEUE_PROVIDER_NAME + ", consumer="
+ + consumerProvider.getName());
+ } else {
+ // Update index and save
+ indexRecord.setLinePosition(currLine);
+ indexRecord.setStatus(SPOOL_FILE_STATUS.read_inprogress);
+ indexRecord.setLastSuccessTime(new Date());
+ indexRecord.setLastAttempt(true);
+ saveIndexFile();
+
+ if (isDestDown) {
+ isDestDown = false;
+ logger.info("Destination up now. " + indexRecord.getFilePath()
+ + ", queueName=" + FILE_QUEUE_PROVIDER_NAME
+ + ", consumer=" + consumerProvider.getName());
+ }
+ }
+ } catch (Throwable t) {
+ logger.error("Error while sending logs to consumer. provider="
+ + FILE_QUEUE_PROVIDER_NAME + ", consumer="
+ + consumerProvider.getName() + ", log=" + events, t);
+ }
+
+ return ret;
+ }
+
+ private void logFile(File file) throws Exception {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AuditFileQueueSpool.logFile()");
+ }
+ int currLine = 0;
+ int startLine = currentConsumerIndexRecord.getLinePosition();
+
+ if (currLine < startLine) {
+ currLine++;
+ }
+
+ boolean ret = sendFile(file,currentConsumerIndexRecord, currLine);
+ if (!ret) {
+ throw new Exception("Destination down");
+ }
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AuditFileQueueSpool.logFile()");
+ }
+ }
+
+ private boolean sendFile(File file, AuditIndexRecord indexRecord,
+ int currLine) {
+ boolean ret = true;
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AuditFileQueueSpool.sendFile()");
+ }
+
+ try {
+ ret = consumerProvider.logFile(file);
+ if (!ret) {
+ // Need to log error after fixed interval
+ logError("Error sending log file to consumer. provider="
+ + FILE_QUEUE_PROVIDER_NAME + ", consumer="
+ + consumerProvider.getName()+ ", logFile=" + file.getName());
+ } else {
+ // Update index and save
+ indexRecord.setLinePosition(currLine);
+ indexRecord.setStatus(SPOOL_FILE_STATUS.read_inprogress);
+ indexRecord.setLastSuccessTime(new Date());
+ indexRecord.setLastAttempt(true);
+ saveIndexFile();
+
+ if (isDestDown) {
+ isDestDown = false;
+ logger.info("Destination up now. " + indexRecord.getFilePath()
+ + ", queueName=" + FILE_QUEUE_PROVIDER_NAME
+ + ", consumer=" + consumerProvider.getName());
+ }
+ }
+ } catch (Throwable t) {
+ logger.error("Error sending log file to consumer. provider="
+ + FILE_QUEUE_PROVIDER_NAME + ", consumer="
+ + consumerProvider.getName() + ", logFile=" + file.getName(), t);
+ }
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AuditFileQueueSpool.sendFile() " + ret );
+ }
+ return ret;
+ }
+}
\ No newline at end of file
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java
index cbd819dda6..0e550ae8cb 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditFileSpool.java
@@ -22,7 +22,6 @@
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
-import java.io.FileFilter;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
@@ -38,26 +37,22 @@
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.log4j.MDC;
import org.apache.ranger.audit.model.AuditEventBase;
+import org.apache.ranger.audit.model.AuditIndexRecord;
+import org.apache.ranger.audit.model.SPOOL_FILE_STATUS;
import org.apache.ranger.audit.provider.AuditHandler;
import org.apache.ranger.audit.provider.MiscUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-import com.google.gson.Gson;
-import com.google.gson.GsonBuilder;
+import org.slf4j.MDC;
/**
* This class temporarily stores logs in file system if the destination is
* overloaded or down
*/
public class AuditFileSpool implements Runnable {
- private static final Log logger = LogFactory.getLog(AuditFileSpool.class);
-
- public enum SPOOL_FILE_STATUS {
- pending, write_inprogress, read_inprogress, done
- }
+ private static final Logger logger = LoggerFactory.getLogger(AuditFileSpool.class);
public static final String PROP_FILE_SPOOL_LOCAL_DIR = "filespool.dir";
public static final String PROP_FILE_SPOOL_LOCAL_FILE_NAME = "filespool.filename.format";
@@ -66,14 +61,13 @@ public enum SPOOL_FILE_STATUS {
public static final String PROP_FILE_SPOOL_FILENAME_PREFIX = "filespool.file.prefix";
public static final String PROP_FILE_SPOOL_FILE_ROLLOVER = "filespool.file.rollover.sec";
public static final String PROP_FILE_SPOOL_INDEX_FILE = "filespool.index.filename";
- // public static final String PROP_FILE_SPOOL_INDEX_DONE_FILE =
- // "filespool.index.done_filename";
public static final String PROP_FILE_SPOOL_DEST_RETRY_MS = "filespool.destination.retry.ms";
+ public static final String CONSUMER = ", consumer=";
AuditQueue queueProvider = null;
AuditHandler consumerProvider = null;
- BlockingQueue indexQueue = new LinkedBlockingQueue();
+ BlockingQueue indexQueue = new LinkedBlockingQueue<>();
// Folder and File attributes
File logFolder = null;
@@ -91,7 +85,7 @@ public enum SPOOL_FILE_STATUS {
int errorLogIntervalMS = 30 * 1000; // Every 30 seconds
long lastErrorLogMS = 0;
- List indexRecords = new ArrayList();
+ List indexRecords = new ArrayList<>();
boolean isPending = false;
long lastAttemptTime = 0;
@@ -109,8 +103,6 @@ public enum SPOOL_FILE_STATUS {
boolean isDrain = false;
boolean isDestDown = false;
- private Gson gson = null;
-
public AuditFileSpool(AuditQueue queueProvider,
AuditHandler consumerProvider) {
this.queueProvider = queueProvider;
@@ -123,9 +115,7 @@ public void init(Properties prop) {
public boolean init(Properties props, String basePropertyName) {
if (initDone) {
- logger.error("init() called more than once. queueProvider="
- + queueProvider.getName() + ", consumerProvider="
- + consumerProvider.getName());
+ logger.error("init() called more than once. queueProvider={}, consumerProvider={}", queueProvider.getName(), consumerProvider.getName());
return true;
}
String propPrefix = "xasecure.audit.filespool";
@@ -134,9 +124,6 @@ public boolean init(Properties props, String basePropertyName) {
}
try {
- gson = new GsonBuilder().setDateFormat("yyyy-MM-dd HH:mm:ss.SSS")
- .create();
-
// Initial folder and file properties
String logFolderProp = MiscUtil.getStringProperty(props, propPrefix
+ "." + PROP_FILE_SPOOL_LOCAL_DIR);
@@ -155,41 +142,29 @@ public boolean init(Properties props, String basePropertyName) {
maxArchiveFiles = MiscUtil.getIntProperty(props, propPrefix + "."
+ PROP_FILE_SPOOL_ARCHIVE_MAX_FILES_COUNT, maxArchiveFiles);
- logger.info("retryDestinationMS=" + retryDestinationMS
- + ", queueName=" + queueProvider.getName());
- logger.info("fileRolloverSec=" + fileRolloverSec + ", queueName="
- + queueProvider.getName());
- logger.info("maxArchiveFiles=" + maxArchiveFiles + ", queueName="
- + queueProvider.getName());
+ logger.info("retryDestinationMS={}, queueName={}", retryDestinationMS, queueProvider.getName());
+ logger.info("fileRolloverSec={}, queueName={}", fileRolloverSec, queueProvider.getName());
+ logger.info("maxArchiveFiles={}, queueName={}", maxArchiveFiles, queueProvider.getName());
if (logFolderProp == null || logFolderProp.isEmpty()) {
- logger.fatal("Audit spool folder is not configured. Please set "
- + propPrefix
- + "."
- + PROP_FILE_SPOOL_LOCAL_DIR
- + ". queueName=" + queueProvider.getName());
+ logger.error("Audit spool folder is not configured. Please set {}.{}. queueName={}", propPrefix, PROP_FILE_SPOOL_LOCAL_DIR, queueProvider.getName());
return false;
}
logFolder = new File(logFolderProp);
if (!logFolder.isDirectory()) {
logFolder.mkdirs();
if (!logFolder.isDirectory()) {
- logger.fatal("File Spool folder not found and can't be created. folder="
- + logFolder.getAbsolutePath()
- + ", queueName="
- + queueProvider.getName());
+ logger.error("File Spool folder not found and can't be created. folder={}, queueName={}", logFolder.getAbsolutePath(), queueProvider.getName());
return false;
}
}
- logger.info("logFolder=" + logFolder + ", queueName="
- + queueProvider.getName());
+ logger.info("logFolder={}, queueName={}", logFolder, queueProvider.getName());
if (logFileNameFormat == null || logFileNameFormat.isEmpty()) {
logFileNameFormat = "spool_" + "%app-type%" + "_"
+ "%time:yyyyMMdd-HHmm.ss%.log";
}
- logger.info("logFileNameFormat=" + logFileNameFormat
- + ", queueName=" + queueProvider.getName());
+ logger.info("logFileNameFormat={}, queueName={}", logFileNameFormat, queueProvider.getName());
if (archiveFolderProp == null || archiveFolderProp.isEmpty()) {
archiveFolder = new File(logFolder, "archive");
@@ -199,15 +174,11 @@ public boolean init(Properties props, String basePropertyName) {
if (!archiveFolder.isDirectory()) {
archiveFolder.mkdirs();
if (!archiveFolder.isDirectory()) {
- logger.error("File Spool archive folder not found and can't be created. folder="
- + archiveFolder.getAbsolutePath()
- + ", queueName="
- + queueProvider.getName());
+ logger.error("File Spool archive folder not found and can't be created. folder={}, queueName={}", archiveFolder.getAbsolutePath(), queueProvider.getName());
return false;
}
}
- logger.info("archiveFolder=" + archiveFolder + ", queueName="
- + queueProvider.getName());
+ logger.info("archiveFolder={}, queueName={}", archiveFolder, queueProvider.getName());
if (indexFileName == null || indexFileName.isEmpty()) {
if (fileNamePrefix == null || fileNamePrefix.isEmpty()) {
@@ -224,13 +195,11 @@ public boolean init(Properties props, String basePropertyName) {
if (!indexFile.exists()) {
boolean ret = indexFile.createNewFile();
if (!ret) {
- logger.fatal("Error creating index file. fileName="
- + indexDoneFile.getPath());
+ logger.error("Error creating index file. fileName={}", indexDoneFile.getPath());
return false;
}
}
- logger.info("indexFile=" + indexFile + ", queueName="
- + queueProvider.getName());
+ logger.info("indexFile={}, queueName={}", indexFile, queueProvider.getName());
int lastDot = indexFileName.lastIndexOf('.');
if (lastDot < 0) {
@@ -242,39 +211,34 @@ public boolean init(Properties props, String basePropertyName) {
if (!indexDoneFile.exists()) {
boolean ret = indexDoneFile.createNewFile();
if (!ret) {
- logger.fatal("Error creating index done file. fileName="
- + indexDoneFile.getPath());
+ logger.error("Error creating index done file. fileName={}", indexDoneFile.getPath());
return false;
}
}
- logger.info("indexDoneFile=" + indexDoneFile + ", queueName="
- + queueProvider.getName());
+ logger.info("indexDoneFile={}, queueName={}", indexDoneFile, queueProvider.getName());
// Load index file
loadIndexFile();
for (AuditIndexRecord auditIndexRecord : indexRecords) {
- if (!auditIndexRecord.status.equals(SPOOL_FILE_STATUS.done)) {
+ if (!auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.done)) {
isPending = true;
}
- if (auditIndexRecord.status
+ if (auditIndexRecord.getStatus()
.equals(SPOOL_FILE_STATUS.write_inprogress)) {
currentWriterIndexRecord = auditIndexRecord;
- logger.info("currentWriterIndexRecord="
- + currentWriterIndexRecord.filePath
- + ", queueName=" + queueProvider.getName());
+ logger.info("currentWriterIndexRecord={}, queueName={}", currentWriterIndexRecord.getFilePath(), queueProvider.getName());
}
- if (auditIndexRecord.status
+ if (auditIndexRecord.getStatus()
.equals(SPOOL_FILE_STATUS.read_inprogress)) {
indexQueue.add(auditIndexRecord);
}
}
printIndex();
for (AuditIndexRecord auditIndexRecord : indexRecords) {
- if (auditIndexRecord.status.equals(SPOOL_FILE_STATUS.pending)) {
- File consumerFile = new File(auditIndexRecord.filePath);
+ if (auditIndexRecord.getStatus().equals(SPOOL_FILE_STATUS.pending)) {
+ File consumerFile = new File(auditIndexRecord.getFilePath());
if (!consumerFile.exists()) {
- logger.error("INIT: Consumer file="
- + consumerFile.getPath() + " not found.");
+ logger.error("INIT: Consumer file={} not found.", consumerFile.getPath());
} else {
indexQueue.add(auditIndexRecord);
}
@@ -282,7 +246,7 @@ public boolean init(Properties props, String basePropertyName) {
}
} catch (Throwable t) {
- logger.fatal("Error initializing File Spooler. queue="
+ logger.error("Error initializing File Spooler. queue="
+ queueProvider.getName(), t);
return false;
}
@@ -295,14 +259,11 @@ public boolean init(Properties props, String basePropertyName) {
*/
public void start() {
if (!initDone) {
- logger.error("Cannot start Audit File Spooler. Initilization not done yet. queueName="
- + queueProvider.getName());
+ logger.error("Cannot start Audit File Spooler. Initilization not done yet. queueName={}", queueProvider.getName());
return;
}
- logger.info("Starting writerThread, queueName="
- + queueProvider.getName() + ", consumer="
- + consumerProvider.getName());
+ logger.info("Starting writerThread, queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName());
// Let's start the thread to read
destinationThread = new Thread(this, queueProvider.getName() + "_"
@@ -313,12 +274,10 @@ public void start() {
public void stop() {
if (!initDone) {
- logger.error("Cannot stop Audit File Spooler. Initilization not done. queueName="
- + queueProvider.getName());
+ logger.error("Cannot stop Audit File Spooler. Initilization not done. queueName={}", queueProvider.getName());
return;
}
- logger.info("Stop called, queueName=" + queueProvider.getName()
- + ", consumer=" + consumerProvider.getName());
+ logger.info("Stop called, queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName());
isDrain = true;
flush();
@@ -337,9 +296,7 @@ public void stop() {
continue;
}
try {
- logger.info("Closing open file, queueName="
- + queueProvider.getName() + ", consumer="
- + consumerProvider.getName());
+ logger.info("Closing open file, queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName());
out.flush();
out.close();
@@ -354,15 +311,14 @@ public void stop() {
destinationThread.interrupt();
}
destinationThread = null;
- } catch (Throwable e) {
+ } catch (Exception e) {
// ignore
}
}
public void flush() {
if (!initDone) {
- logger.error("Cannot flush Audit File Spooler. Initilization not done. queueName="
- + queueProvider.getName());
+ logger.error("Cannot flush Audit File Spooler. Initilization not done. queueName={}", queueProvider.getName());
return;
}
PrintWriter out = getOpenLogFileStream();
@@ -379,8 +335,7 @@ public void flush() {
*/
public boolean isPending() {
if (!initDone) {
- logError("isPending(): File Spooler not initialized. queueName="
- + queueProvider.getName());
+ logError("isPending(): File Spooler not initialized. queueName={}", queueProvider.getName());
return false;
}
@@ -399,11 +354,10 @@ public long getLastAttemptTimeDelta() {
return System.currentTimeMillis() - lastAttemptTime;
}
- synchronized public void stashLogs(AuditEventBase event) {
+ public synchronized void stashLogs(AuditEventBase event) {
if (isDrain) {
// Stop has been called, so this method shouldn't be called
- logger.error("stashLogs() is called after stop is called. event="
- + event);
+ logger.error("stashLogs() is called after stop is called. event={}", event);
return;
}
try {
@@ -414,25 +368,24 @@ synchronized public void stashLogs(AuditEventBase event) {
logOut.println(jsonStr);
isPending = true;
} catch (Exception ex) {
- logger.error("Error writing to file. event=" + event, ex);
+ logger.error("Error writing to file. event={}", event, ex);
} finally {
isWriting = false;
}
}
- synchronized public void stashLogs(Collection events) {
+ public synchronized void stashLogs(Collection events) {
for (AuditEventBase event : events) {
stashLogs(event);
}
flush();
}
- synchronized public void stashLogsString(String event) {
+ public synchronized void stashLogsString(String event) {
if (isDrain) {
// Stop has been called, so this method shouldn't be called
- logger.error("stashLogs() is called after stop is called. event="
- + event);
+ logger.error("stashLogs() is called after stop is called. event={}", event);
return;
}
try {
@@ -440,14 +393,14 @@ synchronized public void stashLogsString(String event) {
PrintWriter logOut = getLogFileStream();
logOut.println(event);
} catch (Exception ex) {
- logger.error("Error writing to file. event=" + event, ex);
+ logger.error("Error writing to file. event={}", event, ex);
} finally {
isWriting = false;
}
}
- synchronized public void stashLogsString(Collection events) {
+ public synchronized void stashLogsString(Collection events) {
for (String event : events) {
stashLogsString(event);
}
@@ -461,7 +414,7 @@ synchronized public void stashLogsString(Collection events) {
* @return
* @throws Exception
*/
- synchronized private PrintWriter getOpenLogFileStream() {
+ private synchronized PrintWriter getOpenLogFileStream() {
return logWriter;
}
@@ -469,7 +422,7 @@ synchronized private PrintWriter getOpenLogFileStream() {
* @return
* @throws Exception
*/
- synchronized private PrintWriter getLogFileStream() throws Exception {
+ private synchronized PrintWriter getLogFileStream() throws Exception {
closeFileIfNeeded();
// Either there are no open log file or the previous one has been rolled
@@ -495,19 +448,18 @@ synchronized private PrintWriter getLogFileStream() throws Exception {
newFileName = baseName + "." + i + extension;
}
fileName = newFileName;
- logger.info("Creating new file. queueName="
- + queueProvider.getName() + ", fileName=" + fileName);
+ logger.info("Creating new file. queueName={}, filename={}",queueProvider.getName(), fileName);
// Open the file
logWriter = new PrintWriter(new BufferedWriter(new FileWriter(
outLogFile)));
AuditIndexRecord tmpIndexRecord = new AuditIndexRecord();
- tmpIndexRecord.id = MiscUtil.generateUniqueId();
- tmpIndexRecord.filePath = outLogFile.getPath();
- tmpIndexRecord.status = SPOOL_FILE_STATUS.write_inprogress;
- tmpIndexRecord.fileCreateTime = currentTime;
- tmpIndexRecord.lastAttempt = true;
+ tmpIndexRecord.setId(MiscUtil.generateUniqueId());
+ tmpIndexRecord.setFilePath(outLogFile.getPath());
+ tmpIndexRecord.setStatus(SPOOL_FILE_STATUS.write_inprogress);
+ tmpIndexRecord.setFileCreateTime(currentTime);
+ tmpIndexRecord.setLastAttempt(true);
currentWriterIndexRecord = tmpIndexRecord;
indexRecords.add(currentWriterIndexRecord);
saveIndexFile();
@@ -516,18 +468,15 @@ synchronized private PrintWriter getLogFileStream() throws Exception {
if (logWriter == null) {
// This means the process just started. We need to open the file
// in append mode.
- logger.info("Opening existing file for append. queueName="
- + queueProvider.getName() + ", fileName="
- + currentWriterIndexRecord.filePath);
+ logger.info("Opening existing file for append. queueName={}, filename={}", queueProvider.getName(), currentWriterIndexRecord.getFilePath());
logWriter = new PrintWriter(new BufferedWriter(new FileWriter(
- currentWriterIndexRecord.filePath, true)));
+ currentWriterIndexRecord.getFilePath(), true)));
}
}
return logWriter;
}
- synchronized private void closeFileIfNeeded() throws FileNotFoundException,
- IOException {
+ private synchronized void closeFileIfNeeded() throws IOException {
// Is there file open to write or there are no pending file, then close
// the active file
if (currentWriterIndexRecord != null) {
@@ -537,13 +486,11 @@ synchronized private void closeFileIfNeeded() throws FileNotFoundException,
closeFile = true;
logger.info("Closing file. Only one open file. queueName="
+ queueProvider.getName() + ", fileName="
- + currentWriterIndexRecord.filePath);
+ + currentWriterIndexRecord.getFilePath());
} else if (System.currentTimeMillis()
- - currentWriterIndexRecord.fileCreateTime.getTime() > fileRolloverSec * 1000) {
+ - currentWriterIndexRecord.getFileCreateTime().getTime() > fileRolloverSec * 1000) {
closeFile = true;
- logger.info("Closing file. Rolling over. queueName="
- + queueProvider.getName() + ", fileName="
- + currentWriterIndexRecord.filePath);
+ logger.info("Closing file. Only one open file. queueName={}, filename={}",queueProvider.getName(), currentWriterIndexRecord.getFilePath());
}
if (closeFile) {
// Roll the file
@@ -552,12 +499,10 @@ synchronized private void closeFileIfNeeded() throws FileNotFoundException,
logWriter.close();
logWriter = null;
}
- currentWriterIndexRecord.status = SPOOL_FILE_STATUS.pending;
- currentWriterIndexRecord.writeCompleteTime = new Date();
+ currentWriterIndexRecord.setStatus(SPOOL_FILE_STATUS.pending);
+ currentWriterIndexRecord.setWriteCompleteTime( new Date());
saveIndexFile();
- logger.info("Adding file to queue. queueName="
- + queueProvider.getName() + ", fileName="
- + currentWriterIndexRecord.filePath);
+ logger.info("Adding file to queue. queueName={}, filename={}", queueProvider.getName(), currentWriterIndexRecord.getFilePath());
indexQueue.add(currentWriterIndexRecord);
currentWriterIndexRecord = null;
}
@@ -570,18 +515,17 @@ synchronized private void closeFileIfNeeded() throws FileNotFoundException,
* @throws IOException
*/
void loadIndexFile() throws IOException {
- logger.info("Loading index file. fileName=" + indexFile.getPath());
- BufferedReader br = new BufferedReader(new FileReader(indexFile));
- indexRecords.clear();
- String line;
- while ((line = br.readLine()) != null) {
- if (!line.isEmpty() && !line.startsWith("#")) {
- AuditIndexRecord record = gson.fromJson(line,
- AuditIndexRecord.class);
- indexRecords.add(record);
+ logger.info("Loading index file. fileName={}", indexFile.getPath());
+ try (BufferedReader br = new BufferedReader(new FileReader(indexFile))) {
+ indexRecords.clear();
+ String line;
+ while ((line = br.readLine()) != null) {
+ if (!line.isEmpty() && !line.startsWith("#")) {
+ AuditIndexRecord record = MiscUtil.fromJson(line, AuditIndexRecord.class);
+ indexRecords.add(record);
+ }
}
}
- br.close();
}
synchronized void printIndex() {
@@ -589,8 +533,7 @@ synchronized void printIndex() {
Iterator iter = indexRecords.iterator();
while (iter.hasNext()) {
AuditIndexRecord record = iter.next();
- logger.info("INDEX=" + record + ", isFileExist="
- + (new File(record.filePath).exists()));
+ logger.info("INDEX={}, isFileExist={}", record, (new File(record.getFilePath()).exists()));
}
logger.info("INDEX printIndex() ==== END");
}
@@ -600,10 +543,8 @@ synchronized void removeIndexRecord(AuditIndexRecord indexRecord)
Iterator iter = indexRecords.iterator();
while (iter.hasNext()) {
AuditIndexRecord record = iter.next();
- if (record.id.equals(indexRecord.id)) {
- logger.info("Removing file from index. file=" + record.filePath
- + ", queueName=" + queueProvider.getName()
- + ", consumer=" + consumerProvider.getName());
+ if (record.getId().equals(indexRecord.getId())) {
+ logger.info("Removing file from index. file={}, queueName={}, consumer={}", record.getFilePath(), queueProvider.getName(), consumerProvider.getName());
iter.remove();
appendToDoneFile(record);
@@ -612,27 +553,23 @@ synchronized void removeIndexRecord(AuditIndexRecord indexRecord)
saveIndexFile();
// If there are no more files in the index, then let's assume the
// destination is now available
- if (indexRecords.size() == 0) {
+ if (indexRecords.isEmpty()) {
isPending = false;
}
}
- synchronized void saveIndexFile() throws FileNotFoundException, IOException {
- PrintWriter out = new PrintWriter(indexFile);
- for (AuditIndexRecord auditIndexRecord : indexRecords) {
- out.println(gson.toJson(auditIndexRecord));
+ synchronized void saveIndexFile() throws IOException {
+ try (PrintWriter out = new PrintWriter(indexFile)) {
+ for (AuditIndexRecord auditIndexRecord : indexRecords) {
+ out.println(MiscUtil.stringify(auditIndexRecord));
+ }
}
- out.close();
- // printIndex();
-
}
void appendToDoneFile(AuditIndexRecord indexRecord)
- throws FileNotFoundException, IOException {
- logger.info("Moving to done file. " + indexRecord.filePath
- + ", queueName=" + queueProvider.getName() + ", consumer="
- + consumerProvider.getName());
- String line = gson.toJson(indexRecord);
+ throws IOException {
+ logger.info("Moving to done file. {}, queueName={}, consumer={}", indexRecord.getFilePath(), queueProvider.getName(), consumerProvider.getName());
+ String line = MiscUtil.stringify(indexRecord);
PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(
indexDoneFile, true)));
out.println(line);
@@ -643,102 +580,69 @@ void appendToDoneFile(AuditIndexRecord indexRecord)
File logFile = null;
File archiveFile = null;
try {
- logFile = new File(indexRecord.filePath);
+ logFile = new File(indexRecord.getFilePath());
String fileName = logFile.getName();
archiveFile = new File(archiveFolder, fileName);
- logger.info("Moving logFile " + logFile + " to " + archiveFile);
- logFile.renameTo(archiveFile);
- } catch (Throwable t) {
- logger.error("Error moving log file to archive folder. logFile="
- + logFile + ", archiveFile=" + archiveFile, t);
+ logger.info("Moving logFile{} to {}", logFile, archiveFile);
+ boolean filedRenamed = logFile.renameTo(archiveFile);
+ if(logger.isDebugEnabled()) {
+ logger.debug("logFile renamed to archiveFile {}{}", archiveFile, filedRenamed );
+ }
+ } catch (Exception t) {
+ logger.error("Error moving log file to archive folder. logFile={}, archiveFile={}", logFile, archiveFile, t);
}
archiveFile = null;
try {
// Remove old files
- File[] logFiles = archiveFolder.listFiles(new FileFilter() {
- public boolean accept(File pathname) {
- return pathname.getName().toLowerCase().endsWith(".log");
- }
- });
+ File[] logFiles = archiveFolder.listFiles(pathname -> pathname.getName().toLowerCase().endsWith(".log"));
if (logFiles != null && logFiles.length > maxArchiveFiles) {
int filesToDelete = logFiles.length - maxArchiveFiles;
- BufferedReader br = new BufferedReader(new FileReader(
- indexDoneFile));
- try {
+ try (BufferedReader br = new BufferedReader(new FileReader(indexDoneFile))) {
int filesDeletedCount = 0;
while ((line = br.readLine()) != null) {
if (!line.isEmpty() && !line.startsWith("#")) {
- AuditIndexRecord record = gson.fromJson(line,
- AuditIndexRecord.class);
- logFile = new File(record.filePath);
+ try {
+ AuditIndexRecord record = MiscUtil.fromJson(line,
+ AuditIndexRecord.class);
+ logFile = new File(record.getFilePath());
String fileName = logFile.getName();
archiveFile = new File(archiveFolder, fileName);
if (archiveFile.exists()) {
- logger.info("Deleting archive file "
- + archiveFile);
+ logger.info("Deleting archive file {}", archiveFile);
boolean ret = archiveFile.delete();
if (!ret) {
- logger.error("Error deleting archive file. archiveFile="
- + archiveFile);
+ logger.error("Error deleting archive file. archiveFile={}", archiveFile);
}
filesDeletedCount++;
if (filesDeletedCount >= filesToDelete) {
- logger.info("Deleted " + filesDeletedCount
- + " files");
+ logger.info("Deleted {} files", filesDeletedCount);
break;
}
}
+ } catch (Exception e) {
+ logger.error("Error parsing following JSON: "+line, e);
+ }
}
}
- } finally {
- br.close();
}
}
- } catch (Throwable t) {
+ } catch (Exception t) {
logger.error("Error deleting older archive file. archiveFile="
+ archiveFile, t);
}
}
- void logError(String msg) {
+ void logError(String msg, Object... arguments) {
long currTimeMS = System.currentTimeMillis();
if (currTimeMS - lastErrorLogMS > errorLogIntervalMS) {
- logger.error(msg);
+ logger.error(msg, arguments);
lastErrorLogMS = currTimeMS;
}
}
- class AuditIndexRecord {
- String id;
- String filePath;
- int linePosition = 0;
- SPOOL_FILE_STATUS status = SPOOL_FILE_STATUS.write_inprogress;
- Date fileCreateTime;
- Date writeCompleteTime;
- Date doneCompleteTime;
- Date lastSuccessTime;
- Date lastFailedTime;
- int failedAttemptCount = 0;
- boolean lastAttempt = false;
-
- @Override
- public String toString() {
- return "AuditIndexRecord [id=" + id + ", filePath=" + filePath
- + ", linePosition=" + linePosition + ", status=" + status
- + ", fileCreateTime=" + fileCreateTime
- + ", writeCompleteTime=" + writeCompleteTime
- + ", doneCompleteTime=" + doneCompleteTime
- + ", lastSuccessTime=" + lastSuccessTime
- + ", lastFailedTime=" + lastFailedTime
- + ", failedAttemptCount=" + failedAttemptCount
- + ", lastAttempt=" + lastAttempt + "]";
- }
-
- }
-
class AuditFileSpoolAttempt {
Date attemptTime;
String status;
@@ -755,22 +659,18 @@ public void run() {
//This is done to clear the MDC context to avoid issue with Ranger Auditing for Knox
MDC.clear();
runLogAudit();
- } catch (Throwable t) {
- logger.fatal("Exited thread without abnormaly. queue="
+ } catch (Exception t) {
+ logger.error("Exited thread without abnormaly. queue="
+ consumerProvider.getName(), t);
}
}
public void runLogAudit() {
- // boolean isResumed = false;
while (true) {
try {
if (isDestDown) {
- logger.info("Destination is down. sleeping for "
- + retryDestinationMS
- + " milli seconds. indexQueue=" + indexQueue.size()
- + ", queueName=" + queueProvider.getName()
- + ", consumer=" + consumerProvider.getName());
+ logger.info("Destination is down. sleeping for {} milli seconds. indexQueue={}, queueName={}, consumer={}",
+ retryDestinationMS, indexQueue.size(), queueProvider.getName(), consumerProvider.getName());
Thread.sleep(retryDestinationMS);
}
@@ -793,21 +693,18 @@ public void runLogAudit() {
boolean isRemoveIndex = false;
File consumerFile = new File(
- currentConsumerIndexRecord.filePath);
+ currentConsumerIndexRecord.getFilePath());
if (!consumerFile.exists()) {
- logger.error("Consumer file=" + consumerFile.getPath()
- + " not found.");
+ logger.error("Consumer file={} not found.", consumerFile.getPath());
printIndex();
isRemoveIndex = true;
} else {
// Let's open the file to write
- BufferedReader br = new BufferedReader(new FileReader(
- currentConsumerIndexRecord.filePath));
- try {
- int startLine = currentConsumerIndexRecord.linePosition;
+ try (BufferedReader br = new BufferedReader(new FileReader(currentConsumerIndexRecord.getFilePath()))) {
+ int startLine = currentConsumerIndexRecord.getLinePosition();
String line;
int currLine = 0;
- List lines = new ArrayList();
+ List lines = new ArrayList<>();
while ((line = br.readLine()) != null) {
currLine++;
if (currLine < startLine) {
@@ -823,7 +720,7 @@ public void runLogAudit() {
lines.clear();
}
}
- if (lines.size() > 0) {
+ if (!lines.isEmpty()) {
boolean ret = sendEvent(lines,
currentConsumerIndexRecord, currLine);
if (!ret) {
@@ -831,29 +728,22 @@ public void runLogAudit() {
}
lines.clear();
}
- logger.info("Done reading file. file="
- + currentConsumerIndexRecord.filePath
- + ", queueName=" + queueProvider.getName()
- + ", consumer=" + consumerProvider.getName());
+ logger.info("Done reading file. file={}, queueName={}, consumer={}", currentConsumerIndexRecord.getFilePath(), queueProvider.getName(), consumerProvider.getName());
// The entire file is read
- currentConsumerIndexRecord.status = SPOOL_FILE_STATUS.done;
- currentConsumerIndexRecord.doneCompleteTime = new Date();
- currentConsumerIndexRecord.lastAttempt = true;
+ currentConsumerIndexRecord.setStatus(SPOOL_FILE_STATUS.done);
+ currentConsumerIndexRecord.setDoneCompleteTime(new Date());
+ currentConsumerIndexRecord.setLastAttempt(true);
isRemoveIndex = true;
} catch (Exception ex) {
isDestDown = true;
- logError("Destination down. queueName="
- + queueProvider.getName() + ", consumer="
- + consumerProvider.getName());
+ logError("Destination down. queueName={}, consumer={}", queueProvider.getName(), consumerProvider.getName());
lastAttemptTime = System.currentTimeMillis();
// Update the index file
- currentConsumerIndexRecord.lastFailedTime = new Date();
- currentConsumerIndexRecord.failedAttemptCount++;
- currentConsumerIndexRecord.lastAttempt = false;
+ currentConsumerIndexRecord.setLastFailedTime(new Date());
+ currentConsumerIndexRecord.setFailedAttemptCount(currentConsumerIndexRecord.getFailedAttemptCount()+1);
+ currentConsumerIndexRecord.setLastAttempt(false);
saveIndexFile();
- } finally {
- br.close();
}
}
if (isRemoveIndex) {
@@ -864,12 +754,12 @@ public void runLogAudit() {
}
} catch (InterruptedException e) {
logger.info("Caught exception in consumer thread. Shutdown might be in progress");
- } catch (Throwable t) {
+ break;
+ } catch (Exception t) {
logger.error("Exception in destination writing thread.", t);
}
}
- logger.info("Exiting file spooler. provider=" + queueProvider.getName()
- + ", consumer=" + consumerProvider.getName());
+ logger.info("Exiting file spooler. provider={}, consumer={}", queueProvider.getName(), consumerProvider.getName());
}
private boolean sendEvent(List lines, AuditIndexRecord indexRecord,
@@ -879,28 +769,22 @@ private boolean sendEvent(List lines, AuditIndexRecord indexRecord,
ret = consumerProvider.logJSON(lines);
if (!ret) {
// Need to log error after fixed interval
- logError("Error sending logs to consumer. provider="
- + queueProvider.getName() + ", consumer="
- + consumerProvider.getName());
+ logError("Error sending logs to consumer. provider={}, consumer={}", queueProvider.getName(), consumerProvider.getName());
} else {
// Update index and save
- indexRecord.linePosition = currLine;
- indexRecord.status = SPOOL_FILE_STATUS.read_inprogress;
- indexRecord.lastSuccessTime = new Date();
- indexRecord.lastAttempt = true;
+ indexRecord.setLinePosition(currLine);
+ indexRecord.setStatus(SPOOL_FILE_STATUS.read_inprogress);
+ indexRecord.setLastSuccessTime(new Date());
+ indexRecord.setLastAttempt(true);
saveIndexFile();
if (isDestDown) {
isDestDown = false;
- logger.info("Destination up now. " + indexRecord.filePath
- + ", queueName=" + queueProvider.getName()
- + ", consumer=" + consumerProvider.getName());
+ logger.info("Destination up now. {}, queueName={}, consumer={}", indexRecord.getFilePath(), queueProvider.getName(), consumerProvider.getName());
}
}
- } catch (Throwable t) {
- logger.error("Error while sending logs to consumer. provider="
- + queueProvider.getName() + ", consumer="
- + consumerProvider.getName() + ", log=" + lines, t);
+ } catch (Exception t) {
+ logger.error("Error while sending logs to consumer. provider={}, consumer={}, logEventCount={}", queueProvider.getName(), consumerProvider.getName(), lines.size(), t);
}
return ret;
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java
index e1667a47c0..e2d974121c 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditQueue.java
@@ -21,15 +21,15 @@
import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.ranger.audit.destination.AuditDestination;
import org.apache.ranger.audit.provider.AuditHandler;
import org.apache.ranger.audit.provider.BaseAuditHandler;
import org.apache.ranger.audit.provider.MiscUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public abstract class AuditQueue extends BaseAuditHandler {
- private static final Log LOG = LogFactory.getLog(AuditQueue.class);
+ private static final Logger LOG = LoggerFactory.getLogger(AuditQueue.class);
public static final int AUDIT_MAX_QUEUE_SIZE_DEFAULT = 1024 * 1024;
public static final int AUDIT_BATCH_INTERVAL_DEFAULT_MS = 3000;
@@ -114,7 +114,7 @@ public void init(Properties props, String basePropertyName) {
fileSpooler = new AuditFileSpool(this, consumer);
if (!fileSpooler.init(props, basePropertyName)) {
fileSpoolerEnabled = false;
- LOG.fatal("Couldn't initialize file spooler. Disabling it. queue="
+ LOG.error("Couldn't initialize file spooler. Disabling it. queue="
+ getName() + ", consumer=" + consumer.getName());
}
} else {
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java
index 4c250336c1..cef23db6dc 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/queue/AuditSummaryQueue.java
@@ -28,19 +28,19 @@
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.log4j.MDC;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.provider.AuditHandler;
import org.apache.ranger.audit.provider.MiscUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.MDC;
/**
* This is a non-blocking queue with no limit on capacity.
*/
public class AuditSummaryQueue extends AuditQueue implements Runnable {
- private static final Log logger = LogFactory
- .getLog(AuditSummaryQueue.class);
+ private static final Logger logger = LoggerFactory
+ .getLogger(AuditSummaryQueue.class);
public static final String PROP_SUMMARY_INTERVAL = "summary.interval.ms";
@@ -151,7 +151,7 @@ public void run() {
MDC.clear();
runLogAudit();
} catch (Throwable t) {
- logger.fatal("Exited thread without abnormaly. queue=" + getName(),
+ logger.error("Exited thread without abnormaly. queue=" + getName(),
t);
}
}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java b/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java
index 57f76d8150..5bc3b96c2d 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/test/TestEvents.java
@@ -18,14 +18,13 @@
*/
package org.apache.ranger.audit.test;
-import org.apache.commons.logging.Log;
-import org.apache.log4j.xml.DOMConfigurator;
import org.apache.ranger.audit.model.AuditEventBase;
import org.apache.ranger.audit.model.AuthzAuditEvent;
import org.apache.ranger.audit.model.EnumRepositoryType;
import org.apache.ranger.audit.provider.AuditHandler;
import org.apache.ranger.audit.provider.AuditProviderFactory;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
@@ -34,11 +33,9 @@
public class TestEvents {
- private static final Log LOG = LogFactory.getLog(TestEvents.class);
+ private static final Logger LOG = LoggerFactory.getLogger(TestEvents.class);
public static void main(String[] args) {
- DOMConfigurator.configure("log4j.xml");
-
LOG.info("==> TestEvents.main()");
try {
@@ -48,28 +45,19 @@ public static void main(String[] args) {
File propFile = new File(AUDIT_PROPERTIES_FILE);
- if(propFile.exists()) {
+ if(!propFile.exists()) {
LOG.info("Loading Audit properties file" + AUDIT_PROPERTIES_FILE);
-
- auditProperties.load(new FileInputStream(propFile));
+ try(FileInputStream fileInputStream = new FileInputStream(propFile)) {
+ auditProperties.load(fileInputStream);
+ }
} else {
LOG.info("Audit properties file missing: " + AUDIT_PROPERTIES_FILE);
- auditProperties.setProperty("xasecure.audit.jpa.javax.persistence.jdbc.url", "jdbc:mysql://localhost:3306/xa_db");
- auditProperties.setProperty("xasecure.audit.jpa.javax.persistence.jdbc.user", "xaaudit");
- auditProperties.setProperty("xasecure.audit.jpa.javax.persistence.jdbc.password", "xaaudit");
- auditProperties.setProperty("xasecure.audit.jpa.javax.persistence.jdbc.driver", "com.mysql.jdbc.Driver");
-
auditProperties.setProperty("xasecure.audit.is.enabled", "true");
auditProperties.setProperty("xasecure.audit.log4j.is.enabled", "false");
auditProperties.setProperty("xasecure.audit.log4j.is.async", "false");
auditProperties.setProperty("xasecure.audit.log4j.async.max.queue.size", "100000");
auditProperties.setProperty("xasecure.audit.log4j.async.max.flush.interval.ms", "30000");
- auditProperties.setProperty("xasecure.audit.db.is.enabled", "false");
- auditProperties.setProperty("xasecure.audit.db.is.async", "true");
- auditProperties.setProperty("xasecure.audit.db.async.max.queue.size", "100000");
- auditProperties.setProperty("xasecure.audit.db.async.max.flush.interval.ms", "30000");
- auditProperties.setProperty("xasecure.audit.db.batch.size", "100");
}
AuditProviderFactory factory = new AuditProviderFactory();
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractKerberosUser.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractKerberosUser.java
new file mode 100644
index 0000000000..fd1c96e90e
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractKerberosUser.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.utils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.security.auth.kerberos.KerberosTicket;
+import javax.security.auth.login.LoginContext;
+import javax.security.auth.login.LoginException;
+import java.security.PrivilegedAction;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+public abstract class AbstractKerberosUser implements KerberosUser {
+
+ private static final Logger LOG = LoggerFactory.getLogger(AbstractKerberosUser.class);
+
+ static final String DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ss'Z'";
+
+ /**
+ * Percentage of the ticket window to use before we renew the TGT.
+ */
+ static final float TICKET_RENEW_WINDOW = 0.80f;
+
+ protected final AtomicBoolean loggedIn = new AtomicBoolean(false);
+
+ protected Subject subject;
+ protected LoginContext loginContext;
+
+ public AbstractKerberosUser() {
+ }
+
+ /**
+ * Performs a login using the specified principal and keytab.
+ *
+ * @throws LoginException if the login fails
+ */
+ @Override
+ public synchronized void login() throws LoginException {
+ if (isLoggedIn()) {
+ return;
+ }
+
+ try {
+ // If it's the first time ever calling login then we need to initialize a new context
+ if (loginContext == null) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Initializing new login context...");
+ }
+ if (this.subject == null) {
+ // only create a new subject if a current one does not exist
+ // other classes may be referencing an existing subject and replacing it may break functionality of those other classes after relogin
+ this.subject = new Subject();
+ }
+ this.loginContext = createLoginContext(subject);
+ }
+
+ loginContext.login();
+ loggedIn.set(true);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Successful login for {}", new Object[]{getPrincipal()});
+ }
+ } catch (LoginException le) {
+ LoginException loginException = new LoginException("Unable to login with " + getPrincipal() + " due to: " + le.getMessage());
+ loginException.setStackTrace(le.getStackTrace());
+ throw loginException;
+ }
+ }
+
+ protected abstract LoginContext createLoginContext(final Subject subject) throws LoginException;
+
+ /**
+ * Performs a logout of the current user.
+ *
+ * @throws LoginException if the logout fails
+ */
+ @Override
+ public synchronized void logout() throws LoginException {
+ if (!isLoggedIn()) {
+ return;
+ }
+
+ try {
+ loginContext.logout();
+ loggedIn.set(false);
+ LOG.debug("Successful logout for {}", new Object[]{getPrincipal()});
+
+ loginContext = null;
+ } catch (LoginException e) {
+ throw new LoginException("Logout failed due to: " + e.getMessage());
+ }
+ }
+
+ /**
+ * Executes the PrivilegedAction as this user.
+ *
+ * @param action the action to execute
+ * @param the type of result
+ * @return the result of the action
+ * @throws IllegalStateException if this method is called while not logged in
+ */
+ @Override
+ public T doAs(final PrivilegedAction action) throws IllegalStateException {
+ if (!isLoggedIn()) {
+ throw new IllegalStateException("Must login before executing actions");
+ }
+
+ return Subject.doAs(subject, action);
+ }
+
+ /**
+ * Executes the PrivilegedAction as this user.
+ *
+ * @param action the action to execute
+ * @param the type of result
+ * @return the result of the action
+ * @throws IllegalStateException if this method is called while not logged in
+ * @throws PrivilegedActionException if an exception is thrown from the action
+ */
+ @Override
+ public T doAs(final PrivilegedExceptionAction action)
+ throws IllegalStateException, PrivilegedActionException {
+ if (!isLoggedIn()) {
+ throw new IllegalStateException("Must login before executing actions");
+ }
+
+ return Subject.doAs(subject, action);
+ }
+
+ /**
+ * Re-login a user from keytab if TGT is expired or is close to expiry.
+ *
+ * @throws LoginException if an error happens performing the re-login
+ */
+ @Override
+ public synchronized boolean checkTGTAndRelogin() throws LoginException {
+ final KerberosTicket tgt = getTGT();
+ if (tgt == null) {
+ LOG.debug("TGT was not found");
+ }
+
+ if (tgt != null && System.currentTimeMillis() < getRefreshTime(tgt)) {
+ LOG.debug("TGT was found, but has not reached expiration window");
+ return false;
+ }
+
+ LOG.debug("Performing relogin for {}", new Object[]{getPrincipal()});
+ logout();
+ login();
+ return true;
+ }
+
+ /**
+ * Get the Kerberos TGT.
+ *
+ * @return the user's TGT or null if none was found
+ */
+ private synchronized KerberosTicket getTGT() {
+ final Set tickets = subject.getPrivateCredentials(KerberosTicket.class);
+
+ for (KerberosTicket ticket : tickets) {
+ if (isTGSPrincipal(ticket.getServer())) {
+ return ticket;
+ }
+ }
+
+ return null;
+ }
+
+ /**
+ * TGS must have the server principal of the form "krbtgt/FOO@FOO".
+ *
+ * @param principal the principal to check
+ * @return true if the principal is the TGS, false otherwise
+ */
+ private boolean isTGSPrincipal(final KerberosPrincipal principal) {
+ if (principal == null) {
+ return false;
+ }
+
+ if (principal.getName().equals("krbtgt/" + principal.getRealm() + "@" + principal.getRealm())) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Found TGT principal: " + principal.getName());
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+ private long getRefreshTime(final KerberosTicket tgt) {
+ long start = tgt.getStartTime().getTime();
+ long end = tgt.getEndTime().getTime();
+
+ if (LOG.isTraceEnabled()) {
+ final SimpleDateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT);
+ final String startDate = dateFormat.format(new Date(start));
+ final String endDate = dateFormat.format(new Date(end));
+ LOG.trace("TGT valid starting at: " + startDate);
+ LOG.trace("TGT expires at: " + endDate);
+ }
+
+ return start + (long) ((end - start) * TICKET_RENEW_WINDOW);
+ }
+
+ /**
+ * @return true if this user is currently logged in, false otherwise
+ */
+ @Override
+ public boolean isLoggedIn() {
+ return loggedIn.get();
+ }
+
+ @Override
+ public String toString() {
+ return "KerberosUser{" +
+ "principal='" + getPrincipal() + '\'' +
+ ", loggedIn=" + loggedIn +
+ '}';
+ }
+}
+
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractRangerAuditWriter.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractRangerAuditWriter.java
new file mode 100644
index 0000000000..0e74e3bd4b
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/AbstractRangerAuditWriter.java
@@ -0,0 +1,392 @@
+package org.apache.ranger.audit.utils;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.*;
+import org.apache.ranger.audit.provider.MiscUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.net.URI;
+import java.util.Date;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * This is Abstract class to have common properties of Ranger Audit HDFS Destination Writer.
+ */
+public abstract class AbstractRangerAuditWriter implements RangerAuditWriter {
+ private static final Logger logger = LoggerFactory.getLogger(AbstractRangerAuditWriter.class);
+
+ public static final String PROP_FILESYSTEM_DIR = "dir";
+ public static final String PROP_FILESYSTEM_SUBDIR = "subdir";
+ public static final String PROP_FILESYSTEM_FILE_NAME_FORMAT = "filename.format";
+ public static final String PROP_FILESYSTEM_FILE_ROLLOVER = "file.rollover.sec";
+ public static final String PROP_FILESYSTEM_ROLLOVER_PERIOD = "file.rollover.period";
+ public static final String PROP_FILESYSTEM_FILE_EXTENSION = ".log";
+ public Configuration conf = null;
+ public FileSystem fileSystem = null;
+ public Map auditConfigs = null;
+ public Path auditPath = null;
+ public PrintWriter logWriter = null;
+ public RollingTimeUtil rollingTimeUtil = null;
+ public String auditProviderName = null;
+ public String fullPath = null;
+ public String parentFolder = null;
+ public String currentFileName = null;
+ public String logFileNameFormat = null;
+ public String logFolder = null;
+ public String fileExtension = null;
+ public String rolloverPeriod = null;
+ public String fileSystemScheme = null;
+ public Date nextRollOverTime = null;
+ public int fileRolloverSec = 24 * 60 * 60; // In seconds
+ public boolean rollOverByDuration = false;
+ public volatile FSDataOutputStream ostream = null; // output stream wrapped in logWriter
+ private boolean isHFlushCapableStream = false;
+ protected boolean reUseLastLogFile = false;
+
+ @Override
+ public void init(Properties props, String propPrefix, String auditProviderName, Map auditConfigs) {
+ // Initialize properties for this class
+ // Initial folder and file properties
+ logger.info("==> AbstractRangerAuditWriter.init()");
+ this.auditProviderName = auditProviderName;
+ this.auditConfigs = auditConfigs;
+
+ init(props,propPrefix);
+
+ logger.info("<== AbstractRangerAuditWriter.init()");
+ }
+
+ public void createFileSystemFolders() throws Exception {
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AbstractRangerAuditWriter.createFileSystemFolders()");
+ }
+ // Create a new file
+ Date currentTime = new Date();
+ String fileName = MiscUtil.replaceTokens(logFileNameFormat, currentTime.getTime());
+ parentFolder = MiscUtil.replaceTokens(logFolder, currentTime.getTime());
+ fullPath = parentFolder + Path.SEPARATOR + fileName;
+ String defaultPath = fullPath;
+ conf = createConfiguration();
+ URI uri = URI.create(fullPath);
+ fileSystem = FileSystem.get(uri, conf);
+ auditPath = new Path(fullPath);
+ fileSystemScheme = getFileSystemScheme();
+ logger.info("Checking whether log file exists. "+ fileSystemScheme + "Path= " + fullPath + ", UGI=" + MiscUtil.getUGILoginUser());
+ int i = 0;
+ while (fileSystem.exists(auditPath)) {
+ i++;
+ int lastDot = defaultPath.lastIndexOf('.');
+ String baseName = defaultPath.substring(0, lastDot);
+ String extension = defaultPath.substring(lastDot);
+ fullPath = baseName + "." + i + extension;
+ auditPath = new Path(fullPath);
+ logger.info("Checking whether log file exists. "+ fileSystemScheme + "Path= " + fullPath);
+ }
+ logger.info("Log file doesn't exists. Will create and use it. "+ fileSystemScheme + "Path= " + fullPath);
+
+ // Create parent folders
+ createParents(auditPath, fileSystem);
+
+ currentFileName = fullPath;
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AbstractRangerAuditWriter.createFileSystemFolders()");
+ }
+ }
+
+ public Configuration createConfiguration() {
+ Configuration conf = new Configuration();
+ for (Map.Entry entry : auditConfigs.entrySet()) {
+ String key = entry.getKey();
+ String value = entry.getValue();
+ // for ease of install config file may contain properties with empty value, skip those
+ if (StringUtils.isNotEmpty(value)) {
+ conf.set(key, value);
+ }
+ logger.info("Adding property to "+ fileSystemScheme + " + config: " + key + " => " + value);
+ }
+
+ logger.info("Returning " + fileSystemScheme + "Filesystem Config: " + conf.toString());
+ return conf;
+ }
+
+ public void createParents(Path pathLogfile, FileSystem fileSystem)
+ throws Exception {
+ logger.info("Creating parent folder for " + pathLogfile);
+ Path parentPath = pathLogfile != null ? pathLogfile.getParent() : null;
+
+ if (parentPath != null && fileSystem != null
+ && !fileSystem.exists(parentPath)) {
+ fileSystem.mkdirs(parentPath);
+ }
+ }
+
+ public void init(Properties props, String propPrefix) {
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AbstractRangerAuditWriter.init()");
+ }
+
+ String logFolderProp = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILESYSTEM_DIR);
+ if (StringUtils.isEmpty(logFolderProp)) {
+ logger.error("File destination folder is not configured. Please set "
+ + propPrefix + "."
+ + PROP_FILESYSTEM_DIR + ". name="
+ + auditProviderName);
+ return;
+ }
+
+ String logSubFolder = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILESYSTEM_SUBDIR);
+ if (StringUtils.isEmpty(logSubFolder)) {
+ logSubFolder = "%app-type%/%time:yyyyMMdd%";
+ }
+
+ logFileNameFormat = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILESYSTEM_FILE_NAME_FORMAT);
+ fileRolloverSec = MiscUtil.getIntProperty(props, propPrefix + "." + PROP_FILESYSTEM_FILE_ROLLOVER, fileRolloverSec);
+
+ if (StringUtils.isEmpty(fileExtension)) {
+ setFileExtension(PROP_FILESYSTEM_FILE_EXTENSION);
+ }
+
+ if (logFileNameFormat == null || logFileNameFormat.isEmpty()) {
+ logFileNameFormat = "%app-type%_ranger_audit_%hostname%" + fileExtension;
+ }
+
+ logFolder = logFolderProp + "/" + logSubFolder;
+
+ logger.info("logFolder=" + logFolder + ", destName=" + auditProviderName);
+ logger.info("logFileNameFormat=" + logFileNameFormat + ", destName="+ auditProviderName);
+ logger.info("config=" + auditConfigs.toString());
+
+ rolloverPeriod = MiscUtil.getStringProperty(props, propPrefix + "." + PROP_FILESYSTEM_ROLLOVER_PERIOD);
+ rollingTimeUtil = RollingTimeUtil.getInstance();
+
+ //file.rollover.period is used for rolling over. If it could compute the next roll over time using file.rollover.period
+ //it fall back to use file.rollover.sec for find next rollover time. If still couldn't find default will be 1day window
+ //for rollover.
+ if(StringUtils.isEmpty(rolloverPeriod) ) {
+ rolloverPeriod = rollingTimeUtil.convertRolloverSecondsToRolloverPeriod(fileRolloverSec);
+ }
+
+ try {
+ nextRollOverTime = rollingTimeUtil.computeNextRollingTime(rolloverPeriod);
+ } catch ( Exception e) {
+ logger.warn("Rollover by file.rollover.period failed...will be using the file.rollover.sec for "+ fileSystemScheme + " audit file rollover...", e);
+ rollOverByDuration = true;
+ nextRollOverTime = rollOverByDuration();
+ }
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AbstractRangerAuditWriter.init()");
+ }
+
+ }
+
+ public void closeFileIfNeeded() {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AbstractRangerAuditWriter.closeFileIfNeeded()");
+ }
+
+ if (logWriter == null) {
+ if (logger.isDebugEnabled()){
+ logger.debug("Log writer is null, aborting rollover condition check!");
+ }
+ return;
+ }
+
+ if ( System.currentTimeMillis() >= nextRollOverTime.getTime() ) {
+ logger.info("Closing file. Rolling over. name = {}, fileName = {}", auditProviderName, currentFileName);
+ logWriter.flush();
+ closeWriter();
+ resetWriter();
+ currentFileName = null;
+ reUseLastLogFile = false;
+
+ if (!rollOverByDuration) {
+ try {
+ if(StringUtils.isEmpty(rolloverPeriod) ) {
+ rolloverPeriod = rollingTimeUtil.convertRolloverSecondsToRolloverPeriod(fileRolloverSec);
+ }
+ nextRollOverTime = rollingTimeUtil.computeNextRollingTime(rolloverPeriod);
+ } catch ( Exception e) {
+ logger.warn("Rollover by file.rollover.period failed", e);
+ logger.warn("Using the file.rollover.sec for {} audit file rollover...", fileSystemScheme);
+ nextRollOverTime = rollOverByDuration();
+ }
+ } else {
+ nextRollOverTime = rollOverByDuration();
+ }
+ }
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AbstractRangerAuditWriter.closeFileIfNeeded()");
+ }
+ }
+
+ public Date rollOverByDuration() {
+ long rollOverTime = rollingTimeUtil.computeNextRollingTime(fileRolloverSec,nextRollOverTime);
+ return new Date(rollOverTime);
+ }
+
+ public PrintWriter createWriter() throws Exception {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AbstractRangerAuditWriter.createWriter()");
+ }
+
+ if (logWriter == null) {
+ boolean appendMode = false;
+ // if append is supported, reuse last log file
+ if (reUseLastLogFile && fileSystem.hasPathCapability(auditPath, CommonPathCapabilities.FS_APPEND)) {
+ logger.info("Appending to last log file. auditPath = {}", fullPath);
+ try {
+ ostream = fileSystem.append(auditPath);
+ appendMode = true;
+ } catch (Exception e){
+ logger.error("Failed to append to file {} due to {}", fullPath, e.getMessage());
+ logger.info("Falling back to create a new log file!");
+ appendMode = false;
+ }
+ }
+ if (!appendMode) {
+ // Create the file to write
+ logger.info("Creating new log file. auditPath = {}", fullPath);
+ createFileSystemFolders();
+ ostream = fileSystem.create(auditPath);
+ }
+ logWriter = new PrintWriter(ostream);
+ isHFlushCapableStream = ostream.hasCapability(StreamCapabilities.HFLUSH);
+ }
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AbstractRangerAuditWriter.createWriter()");
+ }
+
+ return logWriter;
+ }
+
+ /**
+ * Closes the writer after writing audits
+ **/
+ public void closeWriter() {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AbstractRangerAuditWriter.closeWriter()");
+ }
+
+ if (ostream != null) {
+ try {
+ ostream.close();
+ } catch (IOException e) {
+ logger.error("Error closing the stream {}", e.getMessage());
+ }
+ }
+ if (logWriter != null)
+ logWriter.close();
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AbstractRangerAuditWriter.closeWriter()");
+ }
+ }
+
+ public void resetWriter() {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AbstractRangerAuditWriter.resetWriter()");
+ }
+
+ logWriter = null;
+ ostream = null;
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AbstractRangerAuditWriter.resetWriter()");
+ }
+ }
+
+ @Override
+ public void flush() {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AbstractRangerAuditWriter.flush() " + fileSystemScheme);
+ }
+ if (ostream != null) {
+ try {
+ synchronized (this) {
+ if (ostream != null)
+ // 1) PrinterWriter does not have bufferring of its own so
+ // we need to flush its underlying stream
+ // 2) HDFS flush() does not really flush all the way to disk.
+ if (isHFlushCapableStream) {
+ //Checking HFLUSH capability of the stream because of HADOOP-13327.
+ //For S3 filesysttem, hflush throws UnsupportedOperationException and hence we call flush.
+ ostream.hflush();
+ } else {
+ ostream.flush();
+ }
+ if (logger.isDebugEnabled()) {
+ logger.debug("Flush " + fileSystemScheme + " audit logs completed.....");
+ }
+ }
+ } catch (IOException e) {
+ logger.error("Error on flushing log writer: " + e.getMessage() +
+ "\nException will be ignored. name=" + auditProviderName + ", fileName=" + currentFileName);
+ }
+ }
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AbstractRangerAuditWriter.flush()");
+ }
+ }
+
+ public boolean logFileToHDFS(File file) throws Exception {
+ boolean ret = false;
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> AbstractRangerAuditWriter.logFileToHDFS()");
+ }
+
+ if (logWriter == null) {
+ // Create the file to write
+ createFileSystemFolders();
+ logger.info("Copying the Audit File" + file.getName() + " to HDFS Path" + fullPath);
+ Path destPath = new Path(fullPath);
+ ret = FileUtil.copy(file,fileSystem,destPath,false,conf);
+ }
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== AbstractRangerAuditWriter.logFileToHDFS()");
+ }
+ return ret;
+ }
+
+ public String getFileSystemScheme() {
+ String ret = null;
+ ret = logFolder.substring(0, (logFolder.indexOf(":")));
+ ret = ret.toUpperCase();
+ return ret;
+ }
+
+ public void setFileExtension(String fileExtension) {
+ this.fileExtension = fileExtension;
+ }
+}
\ No newline at end of file
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/InMemoryJAASConfiguration.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/InMemoryJAASConfiguration.java
index cc61fb88ac..2b59ab6802 100644
--- a/agents-audit/src/main/java/org/apache/ranger/audit/utils/InMemoryJAASConfiguration.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/InMemoryJAASConfiguration.java
@@ -19,7 +19,6 @@
package org.apache.ranger.audit.utils;
import org.apache.commons.collections.MapUtils;
-import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -119,23 +118,26 @@ public final class InMemoryJAASConfiguration extends Configuration {
private static final Logger LOG = LoggerFactory.getLogger(InMemoryJAASConfiguration.class);
- private static final String JAAS_CONFIG_PREFIX_PARAM = "xasecure.audit.jaas.";
- private static final String JAAS_CONFIG_LOGIN_MODULE_NAME_PARAM = "loginModuleName";
- private static final String JAAS_CONFIG_LOGIN_MODULE_CONTROL_FLAG_PARAM = "loginModuleControlFlag";
- private static final String JAAS_CONFIG_LOGIN_OPTIONS_PREFIX = "option";
- private static final String JAAS_PRINCIPAL_PROP = "principal";
+ public static final String JAAS_CONFIG_PREFIX_PARAM = "xasecure.audit.jaas.";
+ public static final String JAAS_CONFIG_LOGIN_MODULE_NAME_PARAM = "loginModuleName";
+ public static final String JAAS_CONFIG_LOGIN_MODULE_CONTROL_FLAG_PARAM = "loginModuleControlFlag";
+ public static final String JAAS_CONFIG_LOGIN_OPTIONS_PREFIX = "option";
+ public static final String JAAS_PRINCIPAL_PROP = "principal";
- private Configuration parent = null;
- private Map> applicationConfigEntryMap = new HashMap<>();
+ private final Configuration parent;
+ private final Map> applicationConfigEntryMap = new HashMap<>();
- public static void init(String propFile) throws Exception {
+ public static InMemoryJAASConfiguration init(String propFile) throws Exception {
LOG.debug("==> InMemoryJAASConfiguration.init( {} ) ", propFile);
- InputStream in = null;
+ InMemoryJAASConfiguration ret = null;
+ InputStream in = null;
try {
Properties properties = new Properties();
+
in = ClassLoader.getSystemResourceAsStream(propFile);
+
if (in == null) {
if (!propFile.startsWith("/")) {
in = ClassLoader.getSystemResourceAsStream("/" + propFile);
@@ -144,8 +146,10 @@ public static void init(String propFile) throws Exception {
in = new FileInputStream(new File(propFile));
}
}
+
properties.load(in);
- init(properties);
+
+ ret = init(properties);
} catch (IOException e) {
throw new Exception("Failed to load JAAS application properties", e);
} finally {
@@ -157,89 +161,101 @@ public static void init(String propFile) throws Exception {
}
}
}
+
LOG.debug("<== InMemoryJAASConfiguration.init( {} ) ", propFile);
+
+ return ret;
}
- public static void init(Properties properties) throws Exception {
+ public static InMemoryJAASConfiguration init(Properties properties) throws Exception {
LOG.debug("==> InMemoryJAASConfiguration.init()");
+ InMemoryJAASConfiguration ret = null;
+
if (properties != null && MapUtils.isNotEmpty(properties)) {
- InMemoryJAASConfiguration conf = new InMemoryJAASConfiguration(properties);
- Configuration.setConfiguration(conf);
+ ret = new InMemoryJAASConfiguration(properties);
} else {
throw new Exception("Failed to load JAAS application properties: properties NULL or empty!");
}
LOG.debug("<== InMemoryJAASConfiguration.init()");
+
+ return ret;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
- LOG.trace("==> InMemoryJAASConfiguration.getAppConfigurationEntry( {} )", name);
+ LOG.debug("==> InMemoryJAASConfiguration.getAppConfigurationEntry( {} )", name);
AppConfigurationEntry[] ret = null;
- if (parent != null) {
- ret = parent.getAppConfigurationEntry(name);
- }
- if (ret == null || ret.length == 0) {
- List retList = applicationConfigEntryMap.get(name);
- if (retList != null && retList.size() > 0) {
- int sz = retList.size();
- ret = new AppConfigurationEntry[sz];
- ret = retList.toArray(ret);
- }
+
+ if (parent != null) {
+ ret = parent.getAppConfigurationEntry(name);
+ }
+
+ if (ret == null || ret.length == 0) {
+ List retList = applicationConfigEntryMap.get(name);
+
+ if (retList != null && retList.size() > 0) {
+ ret = retList.toArray(new AppConfigurationEntry[retList.size()]);
+ }
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("<== InMemoryJAASConfiguration.getAppConfigurationEntry( {} ) : {}", name, toString(ret));
}
- LOG.trace("<== InMemoryJAASConfiguration.getAppConfigurationEntry( {} ) : {}", name, ArrayUtils.toString(ret));
+
return ret;
}
private InMemoryJAASConfiguration(Properties prop) {
parent = Configuration.getConfiguration();
+
initialize(prop);
}
private void initialize(Properties properties) {
LOG.debug("==> InMemoryJAASConfiguration.initialize()");
- int prefixLen = JAAS_CONFIG_PREFIX_PARAM.length();
-
+ int prefixLen = JAAS_CONFIG_PREFIX_PARAM.length();
Map> jaasClients = new HashMap<>();
+
for(String key : properties.stringPropertyNames()) {
if (key.startsWith(JAAS_CONFIG_PREFIX_PARAM)) {
- String jaasKey = key.substring(prefixLen);
- StringTokenizer tokenizer = new StringTokenizer(jaasKey, ".");
- int tokenCount =tokenizer.countTokens();
+ String jaasKey = key.substring(prefixLen);
+ StringTokenizer tokenizer = new StringTokenizer(jaasKey, ".");
+ int tokenCount = tokenizer.countTokens();
+
if (tokenCount > 0) {
- String clientId = tokenizer.nextToken();
+ String clientId = tokenizer.nextToken();
SortedSet indexList = jaasClients.get(clientId);
+
if (indexList == null) {
- indexList = new TreeSet();
+ indexList = new TreeSet<>();
+
jaasClients.put(clientId, indexList);
}
- String indexStr = tokenizer.nextToken();
-
- int indexId = isNumeric(indexStr) ? Integer.parseInt(indexStr) : -1;
+ String indexStr = tokenizer.nextToken();
+ int indexId = isNumeric(indexStr) ? Integer.parseInt(indexStr) : -1;
Integer clientIdIndex = Integer.valueOf(indexId);
if (!indexList.contains(clientIdIndex)) {
indexList.add(clientIdIndex);
}
-
}
}
}
- for(String jaasClient : jaasClients.keySet()) {
+ for(String jaasClient : jaasClients.keySet()) {
for(Integer index : jaasClients.get(jaasClient)) {
-
String keyPrefix = JAAS_CONFIG_PREFIX_PARAM + jaasClient + ".";
if (index > -1) {
keyPrefix = keyPrefix + String.valueOf(index) + ".";
}
- String keyParam = keyPrefix + JAAS_CONFIG_LOGIN_MODULE_NAME_PARAM;
+ String keyParam = keyPrefix + JAAS_CONFIG_LOGIN_MODULE_NAME_PARAM;
String loginModuleName = properties.getProperty(keyParam);
if (loginModuleName == null) {
@@ -252,11 +268,14 @@ private void initialize(Properties properties) {
}
keyParam = keyPrefix + JAAS_CONFIG_LOGIN_MODULE_CONTROL_FLAG_PARAM;
- String controlFlag = properties.getProperty(keyParam);
+
+ String controlFlag = properties.getProperty(keyParam);
AppConfigurationEntry.LoginModuleControlFlag loginControlFlag = null;
+
if (controlFlag != null) {
controlFlag = controlFlag.trim().toLowerCase();
+
if (controlFlag.equals("optional")) {
loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL;
} else if (controlFlag.equals("requisite")) {
@@ -278,14 +297,15 @@ private void initialize(Properties properties) {
loginControlFlag = AppConfigurationEntry.LoginModuleControlFlag.REQUIRED;
}
+ Map options = new HashMap<>();
+ String optionPrefix = keyPrefix + JAAS_CONFIG_LOGIN_OPTIONS_PREFIX + ".";
+ int optionPrefixLen = optionPrefix.length();
- Map options = new HashMap<>();
- String optionPrefix = keyPrefix + JAAS_CONFIG_LOGIN_OPTIONS_PREFIX + ".";
- int optionPrefixLen = optionPrefix.length();
for(String key : properties.stringPropertyNames()) {
if (key.startsWith(optionPrefix)) {
String optionKey = key.substring(optionPrefixLen);
String optionVal = properties.getProperty(key);
+
if (optionVal != null) {
optionVal = optionVal.trim();
@@ -298,6 +318,7 @@ private void initialize(Properties properties) {
+ optionVal + "]");
}
}
+
options.put(optionKey, optionVal);
}
}
@@ -306,30 +327,53 @@ private void initialize(Properties properties) {
if (LOG.isDebugEnabled()) {
StringBuilder sb = new StringBuilder();
+
sb.append("Adding client: [").append(jaasClient).append("{").append(index).append("}]\n");
sb.append("\tloginModule: [").append(loginModuleName).append("]\n");
sb.append("\tcontrolFlag: [").append(loginControlFlag).append("]\n");
+
for (String key : options.keySet()) {
String val = options.get(key);
+
sb.append("\tOptions: [").append(key).append("] => [").append(val).append("]\n");
}
+
LOG.debug(sb.toString());
}
List retList = applicationConfigEntryMap.get(jaasClient);
+
if (retList == null) {
- retList = new ArrayList();
+ retList = new ArrayList<>();
+
applicationConfigEntryMap.put(jaasClient, retList);
}
- retList.add(entry);
-
+ retList.add(entry);
}
}
+
LOG.debug("<== InMemoryJAASConfiguration.initialize()");
}
private static boolean isNumeric(String str) {
return str.matches("-?\\d+(\\.\\d+)?"); //match a number with optional '-' and decimal.
}
+
+ private String toString(AppConfigurationEntry[] entries) {
+ StringBuilder sb = new StringBuilder();
+
+ sb.append('[');
+ if (entries != null) {
+ for (AppConfigurationEntry entry : entries) {
+ sb.append("{ loginModuleName=").append(entry.getLoginModuleName())
+ .append(", controlFlag=").append(entry.getControlFlag())
+ .append(", options=").append(entry.getOptions())
+ .append("}");
+ }
+ }
+ sb.append(']');
+
+ return sb.toString();
+ }
}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosAction.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosAction.java
new file mode 100644
index 0000000000..1bbbca8d1e
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosAction.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.utils;
+
+import org.apache.commons.lang3.Validate;
+import org.slf4j.Logger;
+
+import javax.security.auth.login.LoginException;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+
+/**
+ * Helper class for processors to perform an action as a KerberosUser.
+ */
+public class KerberosAction {
+
+ private final KerberosUser kerberosUser;
+ private final PrivilegedExceptionAction action;
+ private final Logger logger;
+
+ public KerberosAction(final KerberosUser kerberosUser,
+ final PrivilegedExceptionAction action,
+ final Logger logger) {
+ this.kerberosUser = kerberosUser;
+ this.action = action;
+ this.logger = logger;
+ Validate.notNull(this.kerberosUser);
+ Validate.notNull(this.action);
+ Validate.notNull(this.logger);
+ }
+
+ public T execute() throws Exception {
+ T result;
+ // lazily login the first time the processor executes
+ if (!kerberosUser.isLoggedIn()) {
+ try {
+ kerberosUser.login();
+ logger.info("Successful login for " + kerberosUser.getPrincipal());
+ } catch (LoginException e) {
+ throw new Exception("Login failed due to: " + e.getMessage(), e);
+ }
+ }
+
+ // check if we need to re-login, will only happen if re-login window is reached (80% of TGT life)
+ try {
+ kerberosUser.checkTGTAndRelogin();
+ } catch (LoginException e) {
+ throw new Exception("Relogin check failed due to: " + e.getMessage(), e);
+ }
+
+ // attempt to execute the action, if an exception is caught attempt to logout/login and retry
+ try {
+ result = kerberosUser.doAs(action);
+ } catch (SecurityException se) {
+ logger.info("Privileged action failed, attempting relogin and retrying...");
+ logger.debug("", se);
+
+ try {
+ kerberosUser.logout();
+ kerberosUser.login();
+ result = kerberosUser.doAs(action);
+ } catch (Exception e) {
+ throw new Exception("Retrying privileged action failed due to: " + e.getMessage(), e);
+ }
+ } catch (PrivilegedActionException pae) {
+ final Exception cause = pae.getException();
+ throw new Exception("Privileged action failed due to: " + cause.getMessage(), cause);
+ }
+
+ return result;
+ }
+}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosJAASConfigUser.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosJAASConfigUser.java
new file mode 100644
index 0000000000..2667721609
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosJAASConfigUser.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.utils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.Subject;
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.Configuration;
+import javax.security.auth.login.LoginContext;
+import javax.security.auth.login.LoginException;
+
+/**
+ * Used to authenticate and execute actions when Kerberos is enabled and a keytab is being used.
+ *
+ * */
+public class KerberosJAASConfigUser extends AbstractKerberosUser {
+ private static final Logger LOG = LoggerFactory.getLogger(KerberosJAASConfigUser.class);
+
+ private final String configName;
+ private final Configuration config;
+
+ public KerberosJAASConfigUser(final String configName, final Configuration config) {
+ this.configName = configName;
+ this.config = config;
+ }
+
+
+ @Override
+ public String getPrincipal() {
+ String ret = null;
+ AppConfigurationEntry[] entries = config.getAppConfigurationEntry(configName);
+
+ if (entries != null) {
+ for (AppConfigurationEntry entry : entries) {
+ if (entry.getOptions().containsKey(InMemoryJAASConfiguration.JAAS_PRINCIPAL_PROP)) {
+ ret = (String) entry.getOptions().get(InMemoryJAASConfiguration.JAAS_PRINCIPAL_PROP);
+
+ break;
+ }
+ }
+ }
+
+ return ret;
+ }
+
+ @Override
+ protected LoginContext createLoginContext(Subject subject) throws LoginException {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("==> KerberosJAASConfigUser.createLoginContext()");
+ }
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("<== KerberosJAASConfigUser.createLoginContext(), Subject: " + subject);
+ }
+
+ return new LoginContext(configName, subject, null, config);
+ }
+}
+
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosUser.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosUser.java
new file mode 100644
index 0000000000..fb6003e2fd
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/KerberosUser.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.ranger.audit.utils;
+
+import javax.security.auth.login.LoginException;
+import java.security.PrivilegedAction;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+
+/**
+ * A keytab-based user that can login/logout and perform actions as the given user.
+ */
+public interface KerberosUser {
+
+ /**
+ * Performs a login for the given user.
+ *
+ * @throws LoginException if the login fails
+ */
+ void login() throws LoginException;
+
+ /**
+ * Performs a logout for the given user.
+ *
+ * @throws LoginException if the logout fails
+ */
+ void logout() throws LoginException;
+
+ /**
+ * Executes the given action as the given user.
+ *
+ * @param action the action to execute
+ * @param the type of response
+ * @return the result of the action
+ * @throws IllegalStateException if attempting to execute an action before performing a login
+ */
+ T doAs(PrivilegedAction action) throws IllegalStateException;
+
+ /**
+ * Executes the given action as the given user.
+ *
+ * @param action the action to execute
+ * @param the type of response
+ * @return the result of the action
+ * @throws IllegalStateException if attempting to execute an action before performing a login
+ * @throws PrivilegedActionException if the action itself threw an exception
+ */
+ T doAs(PrivilegedExceptionAction action)
+ throws IllegalStateException, PrivilegedActionException;
+
+ /**
+ * Performs a re-login if the TGT is close to expiration.
+ *
+ * @return true if a relogin was performed, false otherwise
+ * @throws LoginException if the relogin fails
+ */
+ boolean checkTGTAndRelogin() throws LoginException;
+
+ /**
+ * @return true if this user is currently logged in, false otherwise
+ */
+ boolean isLoggedIn();
+
+ /**
+ * @return the principal for this user
+ */
+ String getPrincipal();
+
+}
+
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/ORCFileUtil.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/ORCFileUtil.java
new file mode 100644
index 0000000000..c2bee8aad6
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/ORCFileUtil.java
@@ -0,0 +1,449 @@
+package org.apache.ranger.audit.utils;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.ql.exec.vector.*;
+import org.apache.orc.CompressionKind;
+import org.apache.orc.OrcFile;
+import org.apache.orc.OrcFile.WriterOptions;
+import org.apache.orc.TypeDescription;
+import org.apache.orc.Writer;
+import org.apache.ranger.audit.model.AuthzAuditEvent;
+import org.apache.ranger.audit.model.EnumRepositoryType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Date;
+import java.util.Map;
+import java.util.HashMap;
+import java.text.Format;
+import java.text.SimpleDateFormat;
+
+public class ORCFileUtil {
+
+ private static final Logger logger = LoggerFactory.getLogger(ORCFileUtil.class);
+
+ private static volatile ORCFileUtil me = null;
+ protected CompressionKind defaultCompression = CompressionKind.SNAPPY;
+ protected CompressionKind compressionKind = CompressionKind.NONE;
+ protected TypeDescription schema = null;
+ protected VectorizedRowBatch batch = null;
+ protected String auditSchema = null;
+ protected String dateFormat = "yyyy-MM-dd HH:mm:ss";
+
+ protected ArrayList schemaFields = new ArrayList<>();
+ protected Map vectorizedRowBatchMap = new HashMap<>();
+ protected int orcBufferSize;
+ protected long orcStripeSize;
+
+ public static ORCFileUtil getInstance() {
+ ORCFileUtil orcFileUtil = me;
+ if (orcFileUtil == null) {
+ synchronized (ORCFileUtil.class) {
+ orcFileUtil = me;
+ if (orcFileUtil == null) {
+ me = orcFileUtil = new ORCFileUtil();
+ }
+ }
+ }
+ return orcFileUtil;
+ }
+
+ public void init(int orcBufferSize, long orcStripeSize, String compression) throws Exception{
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> ORCFileUtil.init()");
+ }
+ this.orcBufferSize = orcBufferSize;
+ this.orcStripeSize = orcStripeSize;
+ this.compressionKind = getORCCompression(compression);
+ initORCAuditSchema();
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== ORCFileUtil.init() : orcBufferSize: " + orcBufferSize + " stripeSize: " + orcStripeSize +
+ " compression: " + compression);
+ }
+ }
+
+ public Writer createWriter(Configuration conf, FileSystem fs, String path) throws Exception {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> ORCFileUtil.createWriter()");
+ }
+ Writer ret = null;
+ WriterOptions writeOptions = OrcFile.writerOptions(conf)
+ .fileSystem(fs)
+ .setSchema(schema)
+ .bufferSize(orcBufferSize)
+ .stripeSize(orcStripeSize)
+ .compress(compressionKind);
+
+ ret = OrcFile.createWriter(new Path(path), writeOptions);
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== ORCFileUtil.createWriter()");
+ }
+ return ret;
+ }
+
+ public void close(Writer writer) throws Exception {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> ORCFileUtil.close()");
+ }
+
+ writer.close();
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== ORCFileUtil.close()");
+ }
+ }
+
+ public void log(Writer writer, Collection events) throws Exception {
+ int eventBatchSize = events.size();
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> ORCFileUtil.log() : EventSize: " + eventBatchSize + "ORC bufferSize:" + orcBufferSize );
+ }
+
+ try {
+ for(AuthzAuditEvent event : events) {
+ int row = batch.size++;
+ for (int j=0;j ORCWriter.initORCAuditSchema()");
+ }
+ auditSchema = getAuditSchema();
+ Map schemaFieldTypeMap = getSchemaFieldTypeMap();
+ schema = TypeDescription.fromString(auditSchema);
+ batch = schema.createRowBatch(orcBufferSize);
+ buildVectorRowBatch(schemaFieldTypeMap);
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== ORCWriter.initORCAuditSchema()");
+ }
+ }
+
+ protected Map getSchemaFieldTypeMap() {
+ Map ret = new HashMap<>();
+
+ int index1 = auditSchema.indexOf("<");
+ int index2 = auditSchema.indexOf(">");
+ String subAuditSchema = auditSchema.substring(index1+1,index2);
+ String[] fields = subAuditSchema.split(",");
+ schemaFields = new ArrayList<>();
+
+ for (String field: fields) {
+ String[] flds = field.split(":");
+ schemaFields.add(flds[0]);
+ ret.put(flds[0],flds[1]);
+ }
+ return ret;
+ }
+
+ protected void buildVectorRowBatch(Map schemaFieldTypeMap) throws Exception {
+ int i = 0;
+ for (i=0;i cls = fld.getType();
+ Object value = fld.get(event);
+
+ ret.setField(fieldName);
+ ret.setType(cls.getName());
+ ret.setValue(value);
+ } catch (Exception e){
+ logger.error("Error while writing into ORC File:", e);
+ }
+ return ret;
+ }
+
+ protected ColumnVector getColumnVectorType(String fieldType) throws Exception {
+ ColumnVector ret = null;
+ fieldType = fieldType.toLowerCase();
+ switch(fieldType) {
+ case "int" :
+ case "bigint":
+ case "date":
+ case "boolean":
+ ret = new LongColumnVector();
+ break;
+ case "string":
+ case "varchar":
+ case "char":
+ case "binary":
+ ret = new BytesColumnVector();
+ break;
+ case "decimal":
+ ret = new DecimalColumnVector(10,5);
+ break;
+ case "double":
+ case "float":
+ ret = new DoubleColumnVector();
+ break;
+ case "array":
+ case "map":
+ case "uniontype":
+ case "struct":
+ throw new Exception("Unsuppoted field Type");
+ }
+ return ret;
+ }
+
+ protected Long castLongObject(Object object) {
+ Long ret = 0l;
+ try {
+ if (object instanceof Long)
+ ret = ((Long) object);
+ else if (object instanceof Integer) {
+ ret = ((Integer) object).longValue();
+ } else if (object instanceof String) {
+ ret = Long.valueOf((String) object);
+ }
+ } catch (Exception e) {
+ logger.error("Error while writing into ORC File:", e);
+ }
+ return ret;
+ }
+
+ protected String castStringObject(Object object) {
+ String ret = null;
+ try {
+ if (object instanceof String)
+ ret = (String) object;
+ else if (object instanceof Date) {
+ ret = (getDateString((Date) object));
+ }
+ } catch (Exception e) {
+ logger.error("Error while writing into ORC File:", e);
+ }
+ return ret;
+ }
+
+ protected String getAuditSchema() {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> ORCWriter.getAuditSchema()");
+ }
+ String ret = null;
+ String fieldStr = "struct<";
+ StringBuilder sb = new StringBuilder(fieldStr);
+
+ Class auditEventClass = AuthzAuditEvent.class;
+ for(Field fld: auditEventClass.getDeclaredFields()) {
+ if (fld.isAnnotationPresent(JsonProperty.class)) {
+ String field = fld.getName();
+ String fieldType = getShortFieldType(fld.getType().getName());
+ if (fieldType == null) {
+ continue;
+ }
+ fieldStr = field + ":" + fieldType + ",";
+ sb.append(fieldStr);
+ }
+ }
+ fieldStr = sb.toString();
+ if (fieldStr.endsWith(",")) {
+ fieldStr = fieldStr.substring(0, fieldStr.length() - 1);
+ }
+ ret = fieldStr + ">";
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== ORCWriter.getAuditSchema() AuditSchema: " + ret);
+ }
+ return ret;
+ }
+
+ protected String getShortFieldType(String type){
+ String ret = null;
+ switch(type) {
+ case "java.lang.String":
+ ret = "string";
+ break;
+ case "int":
+ ret = "int";
+ break;
+ case "short":
+ ret = "string";
+ break;
+ case "java.util.Date":
+ ret = "string";
+ break;
+ case "long":
+ ret = "bigint";
+ break;
+ default:
+ ret = null;
+ }
+ return ret;
+ }
+
+ class SchemaInfo {
+ String field = null;
+ String type = null;
+ Object value = null;
+
+ public String getField() {
+ return field;
+ }
+
+ public void setField(String field) {
+ this.field = field;
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public void setType(String type) {
+ this.type = type;
+ }
+
+ public Object getValue() {
+ return value;
+ }
+
+ public void setValue(Object value) {
+ this.value = value;
+ }
+ }
+
+ protected CompressionKind getORCCompression(String compression) {
+ CompressionKind ret;
+ if (compression == null) {
+ compression = defaultCompression.name().toLowerCase();
+ }
+ switch(compression) {
+ case "snappy":
+ ret = CompressionKind.SNAPPY;
+ break;
+ case "lzo":
+ ret = CompressionKind.LZO;
+ break;
+ case "zlib":
+ ret = CompressionKind.ZLIB;
+ break;
+ case "none":
+ ret = CompressionKind.NONE;
+ break;
+ default:
+ ret = defaultCompression;
+ break;
+ }
+ return ret;
+ }
+
+ public static void main(String[] args) throws Exception {
+ ORCFileUtil auditOrcFileUtil = new ORCFileUtil();
+ auditOrcFileUtil.init(10000,100000L,"snappy");
+ try {
+ Configuration conf = new Configuration();
+ FileSystem fs = FileSystem.get(conf);
+ Writer write = auditOrcFileUtil.createWriter(conf, fs, "/tmp/test.orc");
+ Collection events = getTestEvent();
+ auditOrcFileUtil.log(write, events);
+ write.close();
+ } catch (Exception e){
+ e.printStackTrace();
+ }
+ }
+
+ protected static Collection getTestEvent() {
+ Collection events = new ArrayList<>();
+ for (int idx=0;idx<20;idx++) {
+ AuthzAuditEvent event = new AuthzAuditEvent();
+ event.setEventId(Integer.toString(idx));
+ event.setClientIP("127.0.0.1");
+ event.setAccessResult((short) 1);
+ event.setAclEnforcer("ranger-acl");
+ event.setRepositoryName("hdfsdev");
+ event.setRepositoryType(EnumRepositoryType.HDFS);
+ event.setResourcePath("/tmp/test-audit.log" +idx+idx+1);
+ event.setResourceType("file");
+ event.setAccessType("read");
+ event.setEventTime(new Date());
+ event.setResultReason(Integer.toString(1));
+ events.add(event);
+ }
+ return events;
+ }
+}
\ No newline at end of file
diff --git a/ugsync/src/main/java/org/apache/ranger/unixusersync/model/GroupUserInfo.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerAuditWriter.java
similarity index 64%
rename from ugsync/src/main/java/org/apache/ranger/unixusersync/model/GroupUserInfo.java
rename to agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerAuditWriter.java
index a2cfa7bcc4..fbe9301f47 100644
--- a/ugsync/src/main/java/org/apache/ranger/unixusersync/model/GroupUserInfo.java
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerAuditWriter.java
@@ -1,3 +1,5 @@
+package org.apache.ranger.audit.utils;
+
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
@@ -17,24 +19,21 @@
* under the License.
*/
-package org.apache.ranger.unixusersync.model;
-
-import java.util.List;
-
-public class GroupUserInfo {
- XGroupInfo xgroupInfo;
- List xuserInfo;
-
- public XGroupInfo getXgroupInfo() {
- return xgroupInfo;
- }
- public void setXgroupInfo(XGroupInfo xgroupInfo) {
- this.xgroupInfo = xgroupInfo;
- }
- public List getXuserInfo() {
- return xuserInfo;
- }
- public void setXuserInfo(List xuserInfo) {
- this.xuserInfo = xuserInfo;
- }
+import java.io.File;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Properties;
+
+public interface RangerAuditWriter {
+ void init(Properties prop, String propPrefix, String auditProviderName, Map auditConfigs);
+
+ boolean log(Collection events) throws Exception;
+
+ boolean logFile(File file) throws Exception;
+
+ void start();
+
+ void flush();
+
+ void stop();
}
diff --git a/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerJSONAuditWriter.java b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerJSONAuditWriter.java
new file mode 100644
index 0000000000..f74f0cbd32
--- /dev/null
+++ b/agents-audit/src/main/java/org/apache/ranger/audit/utils/RangerJSONAuditWriter.java
@@ -0,0 +1,245 @@
+package org.apache.ranger.audit.utils;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.apache.ranger.audit.provider.MiscUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.PrintWriter;
+import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Writes the Ranger audit to HDFS as JSON text
+ */
+public class RangerJSONAuditWriter extends AbstractRangerAuditWriter {
+
+ private static final Logger logger = LoggerFactory.getLogger(RangerJSONAuditWriter.class);
+ public static final String PROP_HDFS_ROLLOVER_ENABLE_PERIODIC_ROLLOVER = "file.rollover.enable.periodic.rollover";
+ public static final String PROP_HDFS_ROLLOVER_PERIODIC_ROLLOVER_CHECK_TIME = "file.rollover.periodic.rollover.check.sec";
+
+ protected String JSON_FILE_EXTENSION = ".log";
+
+ /*
+ * When enableAuditFilePeriodicRollOver is enabled, Audit File in HDFS would be closed by the defined period in
+ * xasecure.audit.destination.hdfs.file.rollover.sec. By default xasecure.audit.destination.hdfs.file.rollover.sec = 86400 sec
+ * and file will be closed midnight. Custom rollover time can be set by defining file.rollover.sec to desire time in seconds.
+ */
+ private boolean enableAuditFilePeriodicRollOver = false;
+
+ /*
+ Time frequency of next occurrence of periodic rollover check. By Default every 60 seconds the check is done.
+ */
+ private long periodicRollOverCheckTimeinSec;
+
+ public void init(Properties props, String propPrefix, String auditProviderName, Map auditConfigs) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("==> RangerJSONAuditWriter.init()");
+ }
+ init();
+ super.init(props,propPrefix,auditProviderName,auditConfigs);
+
+ // start AuditFilePeriodicRollOverTask if enabled.
+ enableAuditFilePeriodicRollOver = MiscUtil.getBooleanProperty(props, propPrefix + "." + PROP_HDFS_ROLLOVER_ENABLE_PERIODIC_ROLLOVER, false);
+ if (enableAuditFilePeriodicRollOver) {
+ periodicRollOverCheckTimeinSec = MiscUtil.getLongProperty(props, propPrefix + "." + PROP_HDFS_ROLLOVER_PERIODIC_ROLLOVER_CHECK_TIME, 60L);
+ try {
+ if (logger.isDebugEnabled()) {
+ logger.debug("rolloverPeriod: " + rolloverPeriod + " nextRollOverTime: " + nextRollOverTime + " periodicRollOverTimeinSec: " + periodicRollOverCheckTimeinSec);
+ }
+ startAuditFilePeriodicRollOverTask();
+ } catch (Exception e) {
+ logger.warn("Error enabling audit file perodic rollover..! Default behavior will be");
+ }
+ }
+
+ if (logger.isDebugEnabled()) {
+ logger.debug("<== RangerJSONAuditWriter.init()");
+ }
+ }
+
+ public void init() {
+ setFileExtension(JSON_FILE_EXTENSION);
+ }
+
+ synchronized public boolean logJSON(final Collection events) throws Exception {
+ PrintWriter out = null;
+ try {
+ if (logger.isDebugEnabled()) {
+ logger.debug("UGI = {}, will write to HDFS file = {}", MiscUtil.getUGILoginUser(), currentFileName);
+ }
+ out = MiscUtil.executePrivilegedAction(new PrivilegedExceptionAction() {
+ @Override
+ public PrintWriter run() throws Exception {
+ PrintWriter out = getLogFileStream();
+ for (String event : events) {
+ out.println(event);
+ }
+ return out;
+ };
+ });
+ // flush and check the stream for errors
+ if (out.checkError()) {
+ // In theory, this count may NOT be accurate as part of the messages may have been successfully written.
+ // However, in practice, since client does buffering, either all or none would succeed.
+ logger.error("Stream encountered errors while writing audits to HDFS!");
+ closeWriter();
+ resetWriter();
+ reUseLastLogFile = true;
+ return false;
+ }
+ } catch (Exception e) {
+ logger.error("Exception encountered while writing audits to HDFS!", e);
+ closeWriter();
+ resetWriter();
+ reUseLastLogFile = true;
+ return false;
+ } finally {
+ if (logger.isDebugEnabled()) {
+ logger.debug("Flushing HDFS audit. Event Size:" + events.size());
+ }
+ if (out != null) {
+ out.flush();
+ }
+ //closeWriter();
+ }
+
+ return true;
+ }
+
+ @Override
+ public boolean log(Collection