You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by dz...@apache.org on 2021/10/26 12:42:01 UTC

[drill] branch master updated: DRILL-8016: Default to lazy outbound connections for storage-jdbc and storage-splunk (#2342)

This is an automated email from the ASF dual-hosted git repository.

dzamo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git


The following commit(s) were added to refs/heads/master by this push:
     new 946e016  DRILL-8016: Default to lazy outbound connections for storage-jdbc and storage-splunk (#2342)
946e016 is described below

commit 946e0166486219e61ded0edb86897fb506996080
Author: James Turton <91...@users.noreply.github.com>
AuthorDate: Tue Oct 26 14:41:54 2021 +0200

    DRILL-8016: Default to lazy outbound connections for storage-jdbc and storage-splunk (#2342)
    
    * New lazy defaults for HikarCP setting min pool size = 0, max = 10.
    
    * Make storage-splunk plugin connect to Splunk lazily.
    
    * Remove commented code, enable code based on DRILL-8005.
---
 .../drill/exec/store/jdbc/JdbcStoragePlugin.java   | 28 ++++++++++++++++++++++
 .../main/resources/bootstrap-storage-plugins.json  |  4 ++++
 .../exec/store/splunk/SplunkSchemaFactory.java     | 21 ++++++----------
 .../drill/exec/store/AbstractStoragePlugin.java    |  2 +-
 4 files changed, 40 insertions(+), 15 deletions(-)

diff --git a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStoragePlugin.java b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStoragePlugin.java
index daacd0d..8ae93a3 100644
--- a/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStoragePlugin.java
+++ b/contrib/storage-jdbc/src/main/java/org/apache/drill/exec/store/jdbc/JdbcStoragePlugin.java
@@ -120,6 +120,32 @@ public class JdbcStoragePlugin extends AbstractStoragePlugin {
   static HikariDataSource initDataSource(JdbcStorageConfig config) {
     try {
       Properties properties = new Properties();
+
+      /*
+        Set default HikariCP values which prefer to connect lazily to avoid overwhelming source
+      systems with connections which mostly remain idle.  A data source that is present in N
+      storage configs replicated over P drillbits with a HikariCP minimumIdle value of Q will
+      have N×P×Q connections made to it eagerly.
+        The trade off of lazier connections is increased latency should there be a spike in user
+      queries involving a JDBC data source.  When comparing the defaults that follow with e.g. the
+      HikariCP defaults, bear in mind that the context here is OLAP, not OLTP.  It is normal
+      for queries to run for a long time and to be separated by long intermissions. Users who
+      prefer eager to lazy connections remain free to overwrite the following defaults in their
+      storage config.
+      */
+
+      // maximum amount of time that a connection is allowed to sit idle in the pool, 0 = forever
+      properties.setProperty("dataSource.idleTimeout", String.format("%d000", 1*60*60)); // 1 hour
+      // how frequently HikariCP will attempt to keep a connection alive, 0 = disabled
+      properties.setProperty("dataSource.keepaliveTime", String.format("%d000", 0));
+      // maximum lifetime of a connection in the pool, 0 = forever
+      properties.setProperty("dataSource.maxLifetime", String.format("%d000", 6*60*60)); // 6 hours
+      // minimum number of idle connections that HikariCP tries to maintain in the pool, 0 = none
+      properties.setProperty("dataSource.minimumIdle", "0");
+      // maximum size that the pool is allowed to reach, including both idle and in-use connections
+      properties.setProperty("dataSource.maximumPoolSize", "10");
+
+      // apply any HikariCP parameters the user may have set, overwriting defaults
       properties.putAll(config.getSourceParameters());
 
       HikariConfig hikariConfig = new HikariConfig(properties);
@@ -129,6 +155,8 @@ public class JdbcStoragePlugin extends AbstractStoragePlugin {
       UsernamePasswordCredentials credentials = config.getUsernamePasswordCredentials();
       hikariConfig.setUsername(credentials.getUsername());
       hikariConfig.setPassword(credentials.getPassword());
+      // this serves as a hint to the driver, which *might* enable database optimizations
+      hikariConfig.setReadOnly(!config.isWritable());
 
       return new HikariDataSource(hikariConfig);
     } catch (RuntimeException e) {
diff --git a/contrib/storage-jdbc/src/main/resources/bootstrap-storage-plugins.json b/contrib/storage-jdbc/src/main/resources/bootstrap-storage-plugins.json
index 8d04485..dd2fef1 100644
--- a/contrib/storage-jdbc/src/main/resources/bootstrap-storage-plugins.json
+++ b/contrib/storage-jdbc/src/main/resources/bootstrap-storage-plugins.json
@@ -8,6 +8,10 @@
       "password": "xxx",
       "caseInsensitiveTableNames": false,
       "sourceParameters" : {
+        "idleTimeout": 3600000,
+        "keepaliveTime": 0,
+        "maxLifetime": 21600000,
+        "minimumIdle": 0,
         "maximumPoolSize": 10
       },
       "enabled": false
diff --git a/contrib/storage-splunk/src/main/java/org/apache/drill/exec/store/splunk/SplunkSchemaFactory.java b/contrib/storage-splunk/src/main/java/org/apache/drill/exec/store/splunk/SplunkSchemaFactory.java
index fa46ea0..bdd71f9 100644
--- a/contrib/storage-splunk/src/main/java/org/apache/drill/exec/store/splunk/SplunkSchemaFactory.java
+++ b/contrib/storage-splunk/src/main/java/org/apache/drill/exec/store/splunk/SplunkSchemaFactory.java
@@ -18,8 +18,6 @@
 
 package org.apache.drill.exec.store.splunk;
 
-import com.splunk.EntityCollection;
-import com.splunk.Index;
 import org.apache.calcite.schema.SchemaPlus;
 import org.apache.calcite.schema.Table;
 import org.apache.drill.exec.planner.logical.DynamicDrillTable;
@@ -40,19 +38,10 @@ public class SplunkSchemaFactory extends AbstractSchemaFactory {
   private static final Logger logger = LoggerFactory.getLogger(SplunkSchemaFactory.class);
   private static final String SPL_TABLE_NAME = "spl";
   private final SplunkStoragePlugin plugin;
-  private final EntityCollection<Index> indexes;
 
   public SplunkSchemaFactory(SplunkStoragePlugin plugin) {
     super(plugin.getName());
     this.plugin = plugin;
-    SplunkPluginConfig config = plugin.getConfig();
-    SplunkConnection connection = new SplunkConnection(config);
-
-
-
-    // Get Splunk Indexes
-    connection.connect();
-    indexes = connection.getIndexes();
   }
 
   @Override
@@ -110,11 +99,15 @@ public class SplunkSchemaFactory extends AbstractSchemaFactory {
       registerTable(SPL_TABLE_NAME, new DynamicDrillTable(plugin, plugin.getName(),
         new SplunkScanSpec(plugin.getName(), SPL_TABLE_NAME, plugin.getConfig())));
 
-      // Add all other indexes
-      for (String indexName : indexes.keySet()) {
+      // Retrieve and add all other Splunk indexes
+      SplunkPluginConfig config = plugin.getConfig();
+      SplunkConnection connection = new SplunkConnection(config);
+      connection.connect();
+
+      for (String indexName : connection.getIndexes().keySet()) {
         logger.debug("Registering {}", indexName);
         registerTable(indexName, new DynamicDrillTable(plugin, plugin.getName(),
-          new SplunkScanSpec(plugin.getName(), indexName, plugin.getConfig())));
+          new SplunkScanSpec(plugin.getName(), indexName, config)));
       }
     }
   }
diff --git a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
index 1dcd355..485e25b 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/store/AbstractStoragePlugin.java
@@ -90,7 +90,7 @@ public abstract class AbstractStoragePlugin implements StoragePlugin {
 
   /**
    *
-   * Note: Move this method to {@link StoragePlugin} interface in next major version release.
+   * TODO: Move this method to {@link StoragePlugin} interface in next major version release.
    */
   public Set<? extends RelOptRule> getOptimizerRules(OptimizerRulesContext optimizerContext, PlannerPhase phase) {
     switch (phase) {