You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by re...@apache.org on 2014/07/10 12:16:23 UTC

svn commit: r1609432 - in /jackrabbit/oak/trunk/oak-core/src: main/java/org/apache/jackrabbit/oak/plugins/document/rdb/ test/java/org/apache/jackrabbit/oak/plugins/document/

Author: reschke
Date: Thu Jul 10 10:16:23 2014
New Revision: 1609432

URL: http://svn.apache.org/r1609432
Log:
OAK-1944 - make table prefixes for RDBDocumentStore configurable

Added:
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBOptions.java   (with props)
Modified:
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
    jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreFixture.java

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java?rev=1609432&r1=1609431&r2=1609432&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBDocumentStore.java Thu Jul 10 10:16:23 2014
@@ -68,6 +68,8 @@ import org.json.simple.parser.ParseExcep
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import sun.rmi.runtime.Log;
+
 import com.google.common.base.Objects;
 import com.google.common.cache.Cache;
 import com.google.common.collect.Lists;
@@ -168,11 +170,23 @@ public class RDBDocumentStore implements
 
     /**
      * Creates a {@linkplain RDBDocumentStore} instance using the provided
-     * {@link DataSource}.
+     * {@link DataSource}, {@link DocumentMK.Builder}, and {@link RDBOptions}.
+     */
+    public RDBDocumentStore(DataSource ds, DocumentMK.Builder builder, RDBOptions options) {
+        try {
+            initialize(ds, builder, options);
+        } catch (Exception ex) {
+            throw new DocumentStoreException("initializing RDB document store", ex);
+        }
+    }
+
+    /**
+     * Creates a {@linkplain RDBDocumentStore} instance using the provided
+     * {@link DataSource}, {@link DocumentMK.Builder}, and default {@link RDBOptions}.
      */
     public RDBDocumentStore(DataSource ds, DocumentMK.Builder builder) {
         try {
-            initialize(ds, builder);
+            initialize(ds, builder, null);
         } catch (Exception ex) {
             throw new DocumentStoreException("initializing RDB document store", ex);
         }
@@ -254,6 +268,33 @@ public class RDBDocumentStore implements
 
     @Override
     public void dispose() {
+        if (!this.tablesToBeDropped.isEmpty()) {
+            LOG.debug("attempting to drop: " + this.tablesToBeDropped);
+            for (String tname : this.tablesToBeDropped) {
+                Connection con = null;
+                try {
+                    con = getConnection();
+                    try {
+                        Statement stmt = con.createStatement();
+                        stmt.execute("drop table " + tname);
+                        stmt.close();
+                        con.commit();
+                    } catch (SQLException ex) {
+                        LOG.debug("attempting to drop: " + tname);
+                    }
+                } catch (SQLException ex) {
+                    LOG.debug("attempting to drop: " + tname);
+                } finally {
+                    try {
+                        if (con != null) {
+                            con.close();
+                        }
+                    } catch (SQLException ex) {
+                        LOG.debug("on clos ", ex);
+                    }
+                }
+            }
+        }
         this.ds = null;
     }
 
@@ -285,6 +326,11 @@ public class RDBDocumentStore implements
 
     private DataSource ds;
 
+    // from options
+    private String tablePrefix = "";
+    private boolean dropTablesOnClose = false;
+    private Set<String> tablesToBeDropped = new HashSet<String>();
+
     // capacity of DATA column
     // we assume six octets per Java character as worst case for now
     private int datalimit = 16384 / 6;
@@ -296,7 +342,15 @@ public class RDBDocumentStore implements
     private static Set<String> INDEXEDPROPERTIES = new HashSet<String>(Arrays.asList(new String[] { MODIFIED,
             NodeDocument.HAS_BINARY_FLAG }));
 
-    private void initialize(DataSource ds, DocumentMK.Builder builder) throws Exception {
+    private void initialize(DataSource ds, DocumentMK.Builder builder, RDBOptions options) throws Exception {
+
+        if (options != null) {
+            this.tablePrefix = options.getTablePrefix();
+            if (tablePrefix.length() > 0 && !tablePrefix.endsWith("_")) {
+                tablePrefix += "_";
+            }
+            this.dropTablesOnClose = options.isDropTablesOnClose(); // only do this for autocreated tables!!!
+        }
 
         this.ds = ds;
         this.callStack = LOG.isDebugEnabled() ? new Exception("call stack of RDBDocumentStore creation") : null;
@@ -319,57 +373,68 @@ public class RDBDocumentStore implements
         try {
             con.setAutoCommit(false);
 
-            for (String tableName : new String[] { "CLUSTERNODES", "NODES", "SETTINGS" }) {
-                try {
-                    PreparedStatement stmt = con.prepareStatement("select DATA from " + tableName + " where ID = ?");
-                    stmt.setString(1, "0:/");
-                    ResultSet rs = stmt.executeQuery();
-
-                    if ("NODES".equals(tableName)) {
-                        // try to discover size of DATA column
-                        ResultSetMetaData met = rs.getMetaData();
-                        datalimit = met.getPrecision(1) / 6;
-                    }
-                } catch (SQLException ex) {
-                    // table does not appear to exist
-                    con.rollback();
+            createTableFor(con, dbtype, Collection.CLUSTER_NODES);
+            createTableFor(con, dbtype, Collection.NODES);
+            createTableFor(con, dbtype, Collection.SETTINGS);
+        } finally {
+            con.close();
+        }
+    }
 
-                    LOG.info("Attempting to create table " + tableName + " in " + dbtype);
+    private void createTableFor(Connection con, String dbtype, Collection<? extends Document> col) throws SQLException {
+        String tableName = getTable(col);
+        try {
+            PreparedStatement stmt = con.prepareStatement("select DATA from " + tableName + " where ID = ?");
+            stmt.setString(1, "0:/");
+            ResultSet rs = stmt.executeQuery();
 
-                    Statement stmt = con.createStatement();
+            if (col.equals(Collection.NODES)) {
+                // try to discover size of DATA column
+                ResultSetMetaData met = rs.getMetaData();
+                this.datalimit = met.getPrecision(1) / 6;
+            }
+        } catch (SQLException ex) {
+            // table does not appear to exist
+            con.rollback();
 
-                    // the code below likely will need to be extended for new
-                    // database types
-                    if ("PostgreSQL".equals(dbtype)) {
-                        stmt.execute("create table "
-                                + tableName
-                                + " (ID varchar(1000) not null primary key, MODIFIED bigint, HASBINARY smallint, MODCOUNT bigint, DSIZE bigint, DATA varchar(16384), BDATA bytea)");
-                    } else if ("DB2".equals(dbtype) || (dbtype != null && dbtype.startsWith("DB2/"))) {
-                        stmt.execute("create table "
-                                + tableName
-                                + " (ID varchar(1000) not null primary key, MODIFIED bigint, HASBINARY smallint, MODCOUNT bigint, DSIZE bigint, DATA varchar(16384), BDATA blob)");
-                    } else if ("MySQL".equals(dbtype)) {
-                        // see http://dev.mysql.com/doc/refman/5.5/en/innodb-parameters.html#sysvar_innodb_large_prefix
-                        stmt.execute("create table "
-                                + tableName
-                                + " (ID varchar(767) not null primary key, MODIFIED bigint, HASBINARY smallint, MODCOUNT bigint, DSIZE bigint, DATA varchar(16384), BDATA mediumblob)");
-                    } else if ("Oracle".equals(dbtype)) {
-                        // see https://issues.apache.org/jira/browse/OAK-1914
-                        stmt.execute("create table "
-                                + tableName
-                                + " (ID varchar(1000) not null primary key, MODIFIED number, HASBINARY number, MODCOUNT number, DSIZE number, DATA varchar(4000), BDATA blob)");
-                    } else {
-                        stmt.execute("create table "
-                                + tableName
-                                + " (ID varchar(1000) not null primary key, MODIFIED bigint, HASBINARY smallint, MODCOUNT bigint, DSIZE bigint, DATA varchar(16384), BDATA blob)");
-                    }
-                    stmt.close();
+            LOG.info("Attempting to create table " + tableName + " in " + dbtype);
 
-                    con.commit();
-                }
+            Statement stmt = con.createStatement();
+
+            // the code below likely will need to be extended for new
+            // database types
+            if ("PostgreSQL".equals(dbtype)) {
+                stmt.execute("create table "
+                        + tableName
+                        + " (ID varchar(1000) not null primary key, MODIFIED bigint, HASBINARY smallint, MODCOUNT bigint, DSIZE bigint, DATA varchar(16384), BDATA bytea)");
+            } else if ("DB2".equals(dbtype) || (dbtype != null && dbtype.startsWith("DB2/"))) {
+                stmt.execute("create table "
+                        + tableName
+                        + " (ID varchar(1000) not null primary key, MODIFIED bigint, HASBINARY smallint, MODCOUNT bigint, DSIZE bigint, DATA varchar(16384), BDATA blob)");
+            } else if ("MySQL".equals(dbtype)) {
+                // see
+                // http://dev.mysql.com/doc/refman/5.5/en/innodb-parameters.html#sysvar_innodb_large_prefix
+                stmt.execute("create table "
+                        + tableName
+                        + " (ID varchar(767) not null primary key, MODIFIED bigint, HASBINARY smallint, MODCOUNT bigint, DSIZE bigint, DATA varchar(16384), BDATA mediumblob)");
+            } else if ("Oracle".equals(dbtype)) {
+                // see https://issues.apache.org/jira/browse/OAK-1914
+                this.datalimit = 4000 / 6;
+                stmt.execute("create table "
+                        + tableName
+                        + " (ID varchar(1000) not null primary key, MODIFIED number, HASBINARY number, MODCOUNT number, DSIZE number, DATA varchar(4000), BDATA blob)");
+            } else {
+                stmt.execute("create table "
+                        + tableName
+                        + " (ID varchar(1000) not null primary key, MODIFIED bigint, HASBINARY smallint, MODCOUNT bigint, DSIZE bigint, DATA varchar(16384), BDATA blob)");
+            }
+            stmt.close();
+
+            con.commit();
+
+            if (this.dropTablesOnClose) {
+                tablesToBeDropped.add(tableName);
             }
-        } finally {
-            con.close();
         }
     }
 
@@ -607,13 +672,13 @@ public class RDBDocumentStore implements
         return result;
     }
 
-    private static <T extends Document> String getTable(Collection<T> collection) {
+    private <T extends Document> String getTable(Collection<T> collection) {
         if (collection == Collection.CLUSTER_NODES) {
-            return "CLUSTERNODES";
+            return this.tablePrefix + "CLUSTERNODES";
         } else if (collection == Collection.NODES) {
-            return "NODES";
+            return this.tablePrefix + "NODES";
         } else if (collection == Collection.SETTINGS) {
-            return "SETTINGS";
+            return this.tablePrefix + "SETTINGS";
         } else {
             throw new IllegalArgumentException("Unknown collection: " + collection.toString());
         }

Added: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBOptions.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBOptions.java?rev=1609432&view=auto
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBOptions.java (added)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBOptions.java Thu Jul 10 10:16:23 2014
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.plugins.document.rdb;
+
+/**
+ * Options applicable to RDB persistence
+ */
+public class RDBOptions {
+
+    private boolean dropTablesOnClose = false;
+    private String tablePrefix = "";
+
+    public RDBOptions() {
+    }
+
+    /**
+     * Whether to drop the tables on close (in case they have been auto-created)
+     */
+    public RDBOptions dropTablesOnClose(boolean dropTablesOnClose) {
+        this.dropTablesOnClose = dropTablesOnClose;
+        return this;
+    }
+
+    /**
+     * Prefix for table names.
+     */
+    public RDBOptions tablePrefix(String tablePrefix) {
+        this.tablePrefix = tablePrefix;
+        return this;
+    }
+
+    public String getTablePrefix() {
+        return this.tablePrefix;
+    }
+
+    public boolean isDropTablesOnClose() {
+        return this.dropTablesOnClose;
+    }
+}

Propchange: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/document/rdb/RDBOptions.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreFixture.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreFixture.java?rev=1609432&r1=1609431&r2=1609432&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreFixture.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/document/DocumentStoreFixture.java Thu Jul 10 10:16:23 2014
@@ -24,6 +24,7 @@ import org.apache.jackrabbit.oak.plugins
 import org.apache.jackrabbit.oak.plugins.document.mongo.MongoDocumentStore;
 import org.apache.jackrabbit.oak.plugins.document.rdb.RDBDataSourceFactory;
 import org.apache.jackrabbit.oak.plugins.document.rdb.RDBDocumentStore;
+import org.apache.jackrabbit.oak.plugins.document.rdb.RDBOptions;
 import org.apache.jackrabbit.oak.plugins.document.util.MongoConnection;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -85,6 +86,7 @@ public abstract class DocumentStoreFixtu
         DataSource dataSource;
         DocumentStore store1, store2;
         String name;
+        RDBOptions options = new RDBOptions().tablePrefix("dstest").dropTablesOnClose(true);
 
         public RDBFixture(String name, String url, String username, String passwd) {
             this.name = name;
@@ -103,10 +105,10 @@ public abstract class DocumentStoreFixtu
         @Override
         public DocumentStore createDocumentStore(int clusterId) {
             if (clusterId == 1) {
-                store1 = new RDBDocumentStore(dataSource, new DocumentMK.Builder().setClusterId(1));
+                store1 = new RDBDocumentStore(dataSource, new DocumentMK.Builder().setClusterId(1), options);
                 return store1;
             } else if (clusterId == 2) {
-                store2 = new RDBDocumentStore(dataSource, new DocumentMK.Builder().setClusterId(2));
+                store2 = new RDBDocumentStore(dataSource, new DocumentMK.Builder().setClusterId(2), options);
                 return store2;
             } else {
                 throw new RuntimeException("expect clusterId == 1 or == 2");