You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@directory.apache.org by el...@apache.org on 2008/05/01 02:06:46 UTC

svn commit: r652410 [6/14] - in /directory: apacheds/branches/bigbang/ apacheds/branches/bigbang/apacheds-jdbm/ apacheds/branches/bigbang/apacheds-jdbm/src/ apacheds/branches/bigbang/apacheds-jdbm/src/etc/ apacheds/branches/bigbang/apacheds-jdbm/src/ex...

Added: directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordCache.java
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordCache.java?rev=652410&view=auto
==============================================================================
--- directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordCache.java (added)
+++ directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordCache.java Wed Apr 30 17:06:41 2008
@@ -0,0 +1,80 @@
+/**
+ * JDBM LICENSE v1.00
+ *
+ * Redistribution and use of this software and associated documentation
+ * ("Software"), with or without modification, are permitted provided
+ * that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain copyright
+ *    statements and notices.  Redistributions must also contain a
+ *    copy of this document.
+ *
+ * 2. Redistributions in binary form must reproduce the
+ *    above copyright notice, this list of conditions and the
+ *    following disclaimer in the documentation and/or other
+ *    materials provided with the distribution.
+ *
+ * 3. The name "JDBM" must not be used to endorse or promote
+ *    products derived from this Software without prior written
+ *    permission of Cees de Groot.  For written permission,
+ *    please contact cg@cdegroot.com.
+ *
+ * 4. Products derived from this Software may not be called "JDBM"
+ *    nor may "JDBM" appear in their names without prior written
+ *    permission of Cees de Groot. 
+ *
+ * 5. Due credit should be given to the JDBM Project
+ *    (http://jdbm.sourceforge.net/).
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE JDBM PROJECT AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT
+ * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL
+ * CEES DE GROOT OR ANY CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Copyright 2000 (C) Cees de Groot. All Rights Reserved.
+ * Contributions are Copyright (C) 2000 by their associated contributors.
+ *
+ * $Id: RecordCache.java,v 1.2 2005/06/25 23:12:32 doomdark Exp $
+ */
+
+package jdbm.recman;
+
+import java.io.IOException;
+
+/**
+ *  This interface is used for synchronization.
+ *  <p>
+ *  RecordManager ensures that the cache has the up-to-date information
+ *  by way of an invalidation protocol.
+ */
+public interface RecordCache {
+
+    /**
+     * Notification to flush content related to a given record.
+     */
+    public void flush(long recid) throws IOException;
+
+    /**
+     * Notification to flush data all of records.
+     */
+    public void flushAll() throws IOException;
+
+    /**
+     * Notification to invalidate content related to given record.
+     */
+    public void invalidate(long recid) throws IOException;
+
+    /**
+     * Notification to invalidate content of all records.
+     */
+    public void invalidateAll() throws IOException;
+
+}

Added: directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordFile.java
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordFile.java?rev=652410&view=auto
==============================================================================
--- directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordFile.java (added)
+++ directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordFile.java Wed Apr 30 17:06:41 2008
@@ -0,0 +1,412 @@
+/**
+ * JDBM LICENSE v1.00
+ *
+ * Redistribution and use of this software and associated documentation
+ * ("Software"), with or without modification, are permitted provided
+ * that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain copyright
+ *    statements and notices.  Redistributions must also contain a
+ *    copy of this document.
+ *
+ * 2. Redistributions in binary form must reproduce the
+ *    above copyright notice, this list of conditions and the
+ *    following disclaimer in the documentation and/or other
+ *    materials provided with the distribution.
+ *
+ * 3. The name "JDBM" must not be used to endorse or promote
+ *    products derived from this Software without prior written
+ *    permission of Cees de Groot.  For written permission,
+ *    please contact cg@cdegroot.com.
+ *
+ * 4. Products derived from this Software may not be called "JDBM"
+ *    nor may "JDBM" appear in their names without prior written
+ *    permission of Cees de Groot.
+ *
+ * 5. Due credit should be given to the JDBM Project
+ *    (http://jdbm.sourceforge.net/).
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE JDBM PROJECT AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT
+ * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL
+ * CEES DE GROOT OR ANY CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Copyright 2000 (C) Cees de Groot. All Rights Reserved.
+ * Contributions are Copyright (C) 2000 by their associated contributors.
+ *
+ * $Id: RecordFile.java,v 1.6 2005/06/25 23:12:32 doomdark Exp $
+ */
+
+package jdbm.recman;
+
+import java.io.*;
+import java.util.*;
+
+/**
+ *  This class represents a random access file as a set of fixed size
+ *  records. Each record has a physical record number, and records are
+ *  cached in order to improve access.
+ *<p>
+ *  The set of dirty records on the in-use list constitutes a transaction.
+ *  Later on, we will send these records to some recovery thingy.
+ */
+public final class RecordFile {
+    final TransactionManager txnMgr;
+
+    // Todo: reorganize in hashes and fifos as necessary.
+    // free -> inUse -> dirty -> inTxn -> free
+    // free is a cache, thus a FIFO. The rest are hashes.
+    private final LinkedList free = new LinkedList();
+    private final HashMap inUse = new HashMap();
+    private final HashMap dirty = new HashMap();
+    private final HashMap inTxn = new HashMap();
+
+    // transactions disabled?
+    private boolean transactionsDisabled = false;
+
+    /** The length of a single block. */
+    public final static int BLOCK_SIZE = 8192;//4096;
+
+    /** The extension of a record file */
+    final static String extension = ".db";
+
+    /** A block of clean data to wipe clean pages. */
+    final static byte[] cleanData = new byte[BLOCK_SIZE];
+
+    private RandomAccessFile file;
+    private final String fileName;
+
+    /**
+     *  Creates a new object on the indicated filename. The file is
+     *  opened in read/write mode.
+     *
+     *  @param fileName the name of the file to open or create, without
+     *         an extension.
+     *  @throws IOException whenever the creation of the underlying
+     *          RandomAccessFile throws it.
+     */
+    RecordFile(String fileName) throws IOException {
+        this.fileName = fileName;
+        file = new RandomAccessFile(fileName + extension, "rw");
+        txnMgr = new TransactionManager(this);
+    }
+
+    /**
+     *  Returns the file name.
+     */
+    String getFileName() {
+        return fileName;
+    }
+
+    /**
+     *  Disables transactions: doesn't sync and doesn't use the
+     *  transaction manager.
+     */
+    void disableTransactions() {
+        transactionsDisabled = true;
+    }
+
+    /**
+     *  Gets a block from the file. The returned byte array is
+     *  the in-memory copy of the record, and thus can be written
+     *  (and subsequently released with a dirty flag in order to
+     *  write the block back).
+     *
+     *  @param blockid The record number to retrieve.
+     */
+     BlockIo get(long blockid) throws IOException {
+         Long key = new Long(blockid);
+
+         // try in transaction list, dirty list, free list
+         BlockIo node = (BlockIo) inTxn.get(key);
+         if (node != null) {
+             inTxn.remove(key);
+             inUse.put(key, node);
+             return node;
+         }
+         node = (BlockIo) dirty.get(key);
+         if (node != null) {
+             dirty.remove(key);
+             inUse.put(key, node);
+             return node;
+         }
+         for (Iterator i = free.iterator(); i.hasNext(); ) {
+             BlockIo cur = (BlockIo) i.next();
+             if (cur.getBlockId() == blockid) {
+                 node = cur;
+                 i.remove();
+                 inUse.put(key, node);
+                 return node;
+             }
+         }
+
+         // sanity check: can't be on in use list
+         if (inUse.get(key) != null) {
+             throw new Error("double get for block " + blockid);
+         }
+
+         // get a new node and read it from the file
+         node = getNewNode(blockid);
+         long offset = blockid * BLOCK_SIZE;
+         if (file.length() > 0 && offset <= file.length()) {
+             read(file, offset, node.getData(), BLOCK_SIZE);
+         } else {
+             System.arraycopy(cleanData, 0, node.getData(), 0, BLOCK_SIZE);
+         }
+         inUse.put(key, node);
+         node.setClean();
+         return node;
+     }
+
+
+    /**
+     *  Releases a block.
+     *
+     *  @param blockid The record number to release.
+     *  @param isDirty If true, the block was modified since the get().
+     */
+    void release(long blockid, boolean isDirty)
+    throws IOException {
+        BlockIo node = (BlockIo) inUse.get(new Long(blockid));
+        if (node == null)
+            throw new IOException("bad blockid " + blockid + " on release");
+        if (!node.isDirty() && isDirty)
+            node.setDirty();
+        release(node);
+    }
+
+    /**
+     *  Releases a block.
+     *
+     *  @param block The block to release.
+     */
+    void release(BlockIo block) {
+        Long key = new Long(block.getBlockId());
+        inUse.remove(key);
+        if (block.isDirty()) {
+            // System.out.println( "Dirty: " + key + block );
+            dirty.put(key, block);
+        } else {
+            if (!transactionsDisabled && block.isInTransaction()) {
+                inTxn.put(key, block);
+            } else {
+                free.add(block);
+            }
+        }
+    }
+
+    /**
+     *  Discards a block (will not write the block even if it's dirty)
+     *
+     *  @param block The block to discard.
+     */
+    void discard(BlockIo block) {
+        Long key = new Long(block.getBlockId());
+        inUse.remove(key);
+
+        // note: block not added to free list on purpose, because
+        //       it's considered invalid
+    }
+
+    /**
+     *  Commits the current transaction by flushing all dirty buffers
+     *  to disk.
+     */
+    void commit() throws IOException {
+        // debugging...
+        if (!inUse.isEmpty() && inUse.size() > 1) {
+            showList(inUse.values().iterator());
+            throw new Error("in use list not empty at commit time ("
+                            + inUse.size() + ")");
+        }
+
+        //  System.out.println("committing...");
+
+        if ( dirty.size() == 0 ) {
+            // if no dirty blocks, skip commit process
+            return;
+        }
+
+        if (!transactionsDisabled) {
+            txnMgr.start();
+        }
+
+        for (Iterator i = dirty.values().iterator(); i.hasNext(); ) {
+            BlockIo node = (BlockIo) i.next();
+            i.remove();
+            // System.out.println("node " + node + " map size now " + dirty.size());
+            if (transactionsDisabled) {
+                long offset = node.getBlockId() * BLOCK_SIZE;
+                file.seek(offset);
+                file.write(node.getData());
+                node.setClean();
+                free.add(node);
+            }
+            else {
+                txnMgr.add(node);
+                inTxn.put(new Long(node.getBlockId()), node);
+            }
+        }
+        if (!transactionsDisabled) {
+            txnMgr.commit();
+        }
+    }
+
+    /**
+     *  Rollback the current transaction by discarding all dirty buffers
+     */
+    void rollback() throws IOException {
+        // debugging...
+        if (!inUse.isEmpty()) {
+            showList(inUse.values().iterator());
+            throw new Error("in use list not empty at rollback time ("
+                            + inUse.size() + ")");
+        }
+        //  System.out.println("rollback...");
+        dirty.clear();
+
+        txnMgr.synchronizeLogFromDisk();
+
+        if (!inTxn.isEmpty()) {
+            showList(inTxn.values().iterator());
+            throw new Error("in txn list not empty at rollback time ("
+                            + inTxn.size() + ")");
+        };
+    }
+
+    /**
+     *  Commits and closes file.
+     */
+    void close() throws IOException {
+        if (!dirty.isEmpty()) {
+            commit();
+        }
+        txnMgr.shutdown();
+
+        if (!inTxn.isEmpty()) {
+            showList(inTxn.values().iterator());
+            throw new Error("In transaction not empty");
+        }
+
+        // these actually ain't that bad in a production release
+        if (!dirty.isEmpty()) {
+            System.out.println("ERROR: dirty blocks at close time");
+            showList(dirty.values().iterator());
+            throw new Error("Dirty blocks at close time");
+        }
+        if (!inUse.isEmpty()) {
+            System.out.println("ERROR: inUse blocks at close time");
+            showList(inUse.values().iterator());
+            throw new Error("inUse blocks at close time");
+        }
+
+        // debugging stuff to keep an eye on the free list
+        // System.out.println("Free list size:" + free.size());
+        file.close();
+        file = null;
+    }
+
+
+    /**
+     * Force closing the file and underlying transaction manager.
+     * Used for testing purposed only.
+     */
+    void forceClose() throws IOException {
+      txnMgr.forceClose();
+      file.close();
+    }
+
+    /**
+     *  Prints contents of a list
+     */
+    private void showList(Iterator i) {
+        int cnt = 0;
+        while (i.hasNext()) {
+            System.out.println("elem " + cnt + ": " + i.next());
+            cnt++;
+        }
+    }
+
+
+    /**
+     *  Returns a new node. The node is retrieved (and removed)
+     *  from the released list or created new.
+     */
+    private BlockIo getNewNode(long blockid)
+    throws IOException {
+
+        BlockIo retval = null;
+        if (!free.isEmpty()) {
+            retval = (BlockIo) free.removeFirst();
+        }
+        if (retval == null)
+            retval = new BlockIo(0, new byte[BLOCK_SIZE]);
+
+        retval.setBlockId(blockid);
+        retval.setView(null);
+        return retval;
+    }
+
+    /**
+     *  Synchs a node to disk. This is called by the transaction manager's
+     *  synchronization code.
+     */
+    void synch(BlockIo node) throws IOException {
+        byte[] data = node.getData();
+        if (data != null) {
+            long offset = node.getBlockId() * BLOCK_SIZE;
+            file.seek(offset);
+            file.write(data);
+        }
+    }
+
+    /**
+     *  Releases a node from the transaction list, if it was sitting
+     *  there.
+     *
+     *  @param recycle true if block data can be reused
+     */
+    void releaseFromTransaction(BlockIo node, boolean recycle)
+    throws IOException {
+        Long key = new Long(node.getBlockId());
+        if ((inTxn.remove(key) != null) && recycle) {
+            free.add(node);
+        }
+    }
+
+    /**
+     *  Synchronizes the file.
+     */
+    void sync() throws IOException {
+        file.getFD().sync();
+    }
+
+
+    /**
+     * Utility method: Read a block from a RandomAccessFile
+     */
+    private static void read(RandomAccessFile file, long offset,
+                             byte[] buffer, int nBytes) throws IOException {
+        file.seek(offset);
+        int remaining = nBytes;
+        int pos = 0;
+        while (remaining > 0) {
+            int read = file.read(buffer, pos, remaining);
+            if (read == -1) {
+                System.arraycopy(cleanData, 0, buffer, pos, remaining);
+                break;
+            }
+            remaining -= read;
+            pos += read;
+        }
+    }
+
+}

Added: directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordHeader.java
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordHeader.java?rev=652410&view=auto
==============================================================================
--- directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordHeader.java (added)
+++ directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/RecordHeader.java Wed Apr 30 17:06:41 2008
@@ -0,0 +1,107 @@
+/**
+ * JDBM LICENSE v1.00
+ *
+ * Redistribution and use of this software and associated documentation
+ * ("Software"), with or without modification, are permitted provided
+ * that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain copyright
+ *    statements and notices.  Redistributions must also contain a
+ *    copy of this document.
+ *
+ * 2. Redistributions in binary form must reproduce the
+ *    above copyright notice, this list of conditions and the
+ *    following disclaimer in the documentation and/or other
+ *    materials provided with the distribution.
+ *
+ * 3. The name "JDBM" must not be used to endorse or promote
+ *    products derived from this Software without prior written
+ *    permission of Cees de Groot.  For written permission,
+ *    please contact cg@cdegroot.com.
+ *
+ * 4. Products derived from this Software may not be called "JDBM"
+ *    nor may "JDBM" appear in their names without prior written
+ *    permission of Cees de Groot. 
+ *
+ * 5. Due credit should be given to the JDBM Project
+ *    (http://jdbm.sourceforge.net/).
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE JDBM PROJECT AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT
+ * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL
+ * CEES DE GROOT OR ANY CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Copyright 2000 (C) Cees de Groot. All Rights Reserved.
+ * Contributions are Copyright (C) 2000 by their associated contributors.
+ *
+ * $Id: RecordHeader.java,v 1.1 2000/05/06 00:00:31 boisvert Exp $
+ */
+
+package jdbm.recman;
+
+/**
+ *  The data that comes at the start of a record of data. It stores 
+ *  both the current size and the avaliable size for the record - the latter
+ *  can be bigger than the former, which allows the record to grow without
+ *  needing to be moved and which allows the system to put small records
+ *  in larger free spots.
+ */
+class RecordHeader {
+    // offsets
+    private static final short O_CURRENTSIZE = 0; // int currentSize
+    private static final short O_AVAILABLESIZE = Magic.SZ_INT; // int availableSize
+    static final int SIZE = O_AVAILABLESIZE + Magic.SZ_INT;
+    
+    // my block and the position within the block
+    private BlockIo block;
+    private short pos;
+
+    /**
+     *  Constructs a record header from the indicated data starting at
+     *  the indicated position.
+     */
+    RecordHeader(BlockIo block, short pos) {
+        this.block = block;
+        this.pos = pos;
+        if (pos > (RecordFile.BLOCK_SIZE - SIZE))
+            throw new Error("Offset too large for record header (" 
+                            + block.getBlockId() + ":" 
+                            + pos + ")");
+    }
+
+    /** Returns the current size */
+    int getCurrentSize() {
+        return block.readInt(pos + O_CURRENTSIZE);
+    }
+    
+    /** Sets the current size */
+    void setCurrentSize(int value) {
+        block.writeInt(pos + O_CURRENTSIZE, value);
+    }
+    
+    /** Returns the available size */
+    int getAvailableSize() {
+        return block.readInt(pos + O_AVAILABLESIZE);
+    }
+    
+    /** Sets the available size */
+    void setAvailableSize(int value) {
+        block.writeInt(pos + O_AVAILABLESIZE, value);
+    }
+
+    // overrides java.lang.Object
+    public String toString() {
+        return "RH(" + block.getBlockId() + ":" + pos 
+            + ", avl=" + getAvailableSize()
+            + ", cur=" + getCurrentSize() 
+            + ")";
+    }
+}

Added: directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/TransactionManager.java
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/TransactionManager.java?rev=652410&view=auto
==============================================================================
--- directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/TransactionManager.java (added)
+++ directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/TransactionManager.java Wed Apr 30 17:06:41 2008
@@ -0,0 +1,409 @@
+/**
+ * JDBM LICENSE v1.00
+ *
+ * Redistribution and use of this software and associated documentation
+ * ("Software"), with or without modification, are permitted provided
+ * that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain copyright
+ *    statements and notices.  Redistributions must also contain a
+ *    copy of this document.
+ *
+ * 2. Redistributions in binary form must reproduce the
+ *    above copyright notice, this list of conditions and the
+ *    following disclaimer in the documentation and/or other
+ *    materials provided with the distribution.
+ *
+ * 3. The name "JDBM" must not be used to endorse or promote
+ *    products derived from this Software without prior written
+ *    permission of Cees de Groot.  For written permission,
+ *    please contact cg@cdegroot.com.
+ *
+ * 4. Products derived from this Software may not be called "JDBM"
+ *    nor may "JDBM" appear in their names without prior written
+ *    permission of Cees de Groot.
+ *
+ * 5. Due credit should be given to the JDBM Project
+ *    (http://jdbm.sourceforge.net/).
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE JDBM PROJECT AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT
+ * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL
+ * CEES DE GROOT OR ANY CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Copyright 2000 (C) Cees de Groot. All Rights Reserved.
+ * Contributions are Copyright (C) 2000 by their associated contributors.
+ *
+ * $Id: TransactionManager.java,v 1.7 2005/06/25 23:12:32 doomdark Exp $
+ */
+
+package jdbm.recman;
+
+import java.io.*;
+import java.util.*;
+
+/**
+ *  This class manages the transaction log that belongs to every
+ *  {@link RecordFile}. The transaction log is either clean, or
+ *  in progress. In the latter case, the transaction manager
+ *  takes care of a roll forward.
+ *<p>
+ *  Implementation note: this is a proof-of-concept implementation
+ *  which hasn't been optimized for speed. For instance, all sorts
+ *  of streams are created for every transaction.
+ */
+// TODO: Handle the case where we are recovering lg9 and lg0, were we
+// should start with lg9 instead of lg0!
+
+public final class TransactionManager {
+    private RecordFile owner;
+
+    // streams for transaction log.
+    private FileOutputStream fos;
+    private ObjectOutputStream oos;
+
+    /** 
+     * By default, we keep 10 transactions in the log file before
+     * synchronizing it with the main database file.
+     */
+    static final int DEFAULT_TXNS_IN_LOG = 10;
+
+    /** 
+     * Maximum number of transactions before the log file is
+     * synchronized with the main database file.
+     */
+    private int _maxTxns = DEFAULT_TXNS_IN_LOG;
+
+    /**
+     * In-core copy of transactions. We could read everything back from
+     * the log file, but the RecordFile needs to keep the dirty blocks in
+     * core anyway, so we might as well point to them and spare us a lot
+     * of hassle.
+     */
+    private ArrayList[] txns = new ArrayList[DEFAULT_TXNS_IN_LOG];
+    private int curTxn = -1;
+
+    /** Extension of a log file. */
+    static final String extension = ".lg";
+
+    /**
+     *  Instantiates a transaction manager instance. If recovery
+     *  needs to be performed, it is done.
+     *
+     *  @param owner the RecordFile instance that owns this transaction mgr.
+     */
+    TransactionManager(RecordFile owner) throws IOException {
+        this.owner = owner;
+        recover();
+        open();
+    }
+
+    
+    /**
+     * Synchronize log file data with the main database file.
+     * <p>
+     * After this call, the main database file is guaranteed to be 
+     * consistent and guaranteed to be the only file needed for 
+     * backup purposes.
+     */
+    public void synchronizeLog()
+        throws IOException
+    {
+        synchronizeLogFromMemory();
+    }
+
+    
+    /**
+     * Set the maximum number of transactions to record in
+     * the log (and keep in memory) before the log is
+     * synchronized with the main database file.
+     * <p>
+     * This method must be called while there are no
+     * pending transactions in the log.
+     */
+    public void setMaximumTransactionsInLog( int maxTxns )
+        throws IOException
+    {
+        if ( maxTxns <= 0 ) {
+            throw new IllegalArgumentException( 
+                "Argument 'maxTxns' must be greater than 0." );
+        }
+        if ( curTxn != -1 ) {
+            throw new IllegalStateException( 
+                "Cannot change setting while transactions are pending in the log" );
+        }
+        _maxTxns = maxTxns;
+        txns = new ArrayList[ maxTxns ];
+    }
+
+    
+    /** Builds logfile name  */
+    private String makeLogName() {
+        return owner.getFileName() + extension;
+    }
+
+
+    /** Synchs in-core transactions to data file and opens a fresh log */
+    private void synchronizeLogFromMemory() throws IOException {
+        close();
+
+        TreeSet blockList = new TreeSet( new BlockIoComparator() );
+
+        int numBlocks = 0;
+        int writtenBlocks = 0;
+        for (int i = 0; i < _maxTxns; i++) {
+            if (txns[i] == null)
+                continue;
+            // Add each block to the blockList, replacing the old copy of this
+            // block if necessary, thus avoiding writing the same block twice
+            for (Iterator k = txns[i].iterator(); k.hasNext(); ) {
+                BlockIo block = (BlockIo)k.next();
+                if ( blockList.contains( block ) ) {
+                    block.decrementTransactionCount();
+                }
+                else {
+                    writtenBlocks++;
+                    boolean result = blockList.add( block );
+                }
+                numBlocks++;
+            }
+
+            txns[i] = null;
+        }
+        // Write the blocks from the blockList to disk
+        synchronizeBlocks(blockList.iterator(), true);
+
+        owner.sync();
+        open();
+    }
+
+
+    /** Opens the log file */
+    private void open() throws IOException {
+        fos = new FileOutputStream(makeLogName());
+        oos = new ObjectOutputStream(fos);
+        oos.writeShort(Magic.LOGFILE_HEADER);
+        oos.flush();
+        curTxn = -1;
+    }
+
+    /** Startup recovery on all files */
+    private void recover() throws IOException {
+        String logName = makeLogName();
+        File logFile = new File(logName);
+        if (!logFile.exists())
+            return;
+        if (logFile.length() == 0) {
+            logFile.delete();
+            return;
+        }
+
+        FileInputStream fis = new FileInputStream(logFile);
+        ObjectInputStream ois = new ObjectInputStream(fis);
+
+        try {
+            if (ois.readShort() != Magic.LOGFILE_HEADER)
+                throw new Error("Bad magic on log file");
+        } catch (IOException e) {
+            // corrupted/empty logfile
+            logFile.delete();
+            return;
+        }
+
+        while (true) {
+            ArrayList blocks = null;
+            try {
+                blocks = (ArrayList) ois.readObject();
+            } catch (ClassNotFoundException e) {
+                throw new Error("Unexcepted exception: " + e);
+            } catch (IOException e) {
+                // corrupted logfile, ignore rest of transactions
+                break;
+            }
+            synchronizeBlocks(blocks.iterator(), false);
+
+            // ObjectInputStream must match exactly each
+            // ObjectOutputStream created during writes
+            try {
+                ois = new ObjectInputStream(fis);
+            } catch (IOException e) {
+                // corrupted logfile, ignore rest of transactions
+                break;
+            }
+        }
+        owner.sync();
+        logFile.delete();
+    }
+
+    /** Synchronizes the indicated blocks with the owner. */
+    private void synchronizeBlocks(Iterator blockIterator, boolean fromCore)
+    throws IOException {
+        // write block vector elements to the data file.
+        while ( blockIterator.hasNext() ) {
+            BlockIo cur = (BlockIo)blockIterator.next();
+            owner.synch(cur);
+            if (fromCore) {
+                cur.decrementTransactionCount();
+                if (!cur.isInTransaction()) {
+                    owner.releaseFromTransaction(cur, true);
+                }
+            }
+        }
+    }
+
+
+    /** Set clean flag on the blocks. */
+    private void setClean(ArrayList blocks)
+    throws IOException {
+        for (Iterator k = blocks.iterator(); k.hasNext(); ) {
+            BlockIo cur = (BlockIo) k.next();
+            cur.setClean();
+        }
+    }
+
+    /** Discards the indicated blocks and notify the owner. */
+    private void discardBlocks(ArrayList blocks)
+    throws IOException {
+        for (Iterator k = blocks.iterator(); k.hasNext(); ) {
+            BlockIo cur = (BlockIo) k.next();
+            cur.decrementTransactionCount();
+            if (!cur.isInTransaction()) {
+                owner.releaseFromTransaction(cur, false);
+            }
+        }
+    }
+
+    /**
+     *  Starts a transaction. This can block if all slots have been filled
+     *  with full transactions, waiting for the synchronization thread to
+     *  clean out slots.
+     */
+    void start() throws IOException {
+        curTxn++;
+        if (curTxn == _maxTxns) {
+            synchronizeLogFromMemory();
+            curTxn = 0;
+        }
+        txns[curTxn] = new ArrayList();
+    }
+
+    /**
+     *  Indicates the block is part of the transaction.
+     */
+    void add(BlockIo block) throws IOException {
+        block.incrementTransactionCount();
+        txns[curTxn].add(block);
+    }
+
+    /**
+     *  Commits the transaction to the log file.
+     */
+    void commit() throws IOException {
+        oos.writeObject(txns[curTxn]);
+        sync();
+
+        // set clean flag to indicate blocks have been written to log
+        setClean(txns[curTxn]);
+
+        // open a new ObjectOutputStream in order to store
+        // newer states of BlockIo
+        oos = new ObjectOutputStream(fos);
+    }
+
+    /** Flushes and syncs */
+    private void sync() throws IOException {
+        oos.flush();
+        fos.flush();
+        fos.getFD().sync();
+    }
+
+    /**
+     *  Shutdowns the transaction manager. Resynchronizes outstanding
+     *  logs.
+     */
+    void shutdown() throws IOException {
+        synchronizeLogFromMemory();
+        close();
+    }
+
+    /**
+     *  Closes open files.
+     */
+    private void close() throws IOException {
+        sync();
+        oos.close();
+        fos.close();
+        oos = null;
+        fos = null;
+    }
+
+    /**
+     * Force closing the file without synchronizing pending transaction data.
+     * Used for testing purposes only.
+     */
+    void forceClose() throws IOException {
+        oos.close();
+        fos.close();
+        oos = null;
+        fos = null;
+    }
+
+    /**
+     * Use the disk-based transaction log to synchronize the data file.
+     * Outstanding memory logs are discarded because they are believed
+     * to be inconsistent.
+     */
+    void synchronizeLogFromDisk() throws IOException {
+        close();
+
+        for ( int i=0; i < _maxTxns; i++ ) {
+            if (txns[i] == null)
+                continue;
+            discardBlocks(txns[i]);
+            txns[i] = null;
+        }
+
+        recover();
+        open();
+    }
+
+
+    /** INNER CLASS.
+     *  Comparator class for use by the tree set used to store the blocks
+     *  to write for this transaction.  The BlockIo objects are ordered by
+     *  their blockIds.
+     */
+    public static class BlockIoComparator
+        implements Comparator
+    {
+
+        public int compare( Object o1, Object o2 ) {
+            BlockIo block1 = (BlockIo)o1;
+            BlockIo block2 = (BlockIo)o2;
+            int result = 0;
+            if ( block1.getBlockId() == block2.getBlockId() ) {
+                result = 0;
+            }
+            else if ( block1.getBlockId() < block2.getBlockId() ) {
+                result = -1;
+            }
+            else {
+                result = 1;
+            }
+            return result;
+        }
+
+        public boolean equals(Object obj) {
+            return super.equals(obj);
+        }
+    } // class BlockIOComparator
+
+}

Added: directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/TranslationPage.java
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/TranslationPage.java?rev=652410&view=auto
==============================================================================
--- directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/TranslationPage.java (added)
+++ directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/TranslationPage.java Wed Apr 30 17:06:41 2008
@@ -0,0 +1,91 @@
+/**
+ * JDBM LICENSE v1.00
+ *
+ * Redistribution and use of this software and associated documentation
+ * ("Software"), with or without modification, are permitted provided
+ * that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain copyright
+ *    statements and notices.  Redistributions must also contain a
+ *    copy of this document.
+ *
+ * 2. Redistributions in binary form must reproduce the
+ *    above copyright notice, this list of conditions and the
+ *    following disclaimer in the documentation and/or other
+ *    materials provided with the distribution.
+ *
+ * 3. The name "JDBM" must not be used to endorse or promote
+ *    products derived from this Software without prior written
+ *    permission of Cees de Groot.  For written permission,
+ *    please contact cg@cdegroot.com.
+ *
+ * 4. Products derived from this Software may not be called "JDBM"
+ *    nor may "JDBM" appear in their names without prior written
+ *    permission of Cees de Groot. 
+ *
+ * 5. Due credit should be given to the JDBM Project
+ *    (http://jdbm.sourceforge.net/).
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE JDBM PROJECT AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT
+ * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL
+ * CEES DE GROOT OR ANY CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Copyright 2000 (C) Cees de Groot. All Rights Reserved.
+ * Contributions are Copyright (C) 2000 by their associated contributors.
+ *
+ * $Id: TranslationPage.java,v 1.1 2000/05/06 00:00:31 boisvert Exp $
+ */
+
+package jdbm.recman;
+
+/**
+ *  Class describing a page that holds translations from physical rowids
+ *  to logical rowids. In fact, the page just holds physical rowids - the
+ *  page's block is the block for the logical rowid, the offset serve
+ *  as offset for the rowids.
+ */
+final class TranslationPage extends PageHeader {
+    // offsets
+    static final short O_TRANS = PageHeader.SIZE; // short count
+    static final short ELEMS_PER_PAGE = 
+        (RecordFile.BLOCK_SIZE - O_TRANS) / PhysicalRowId.SIZE;
+    
+    // slots we returned.
+    final PhysicalRowId[] slots = new PhysicalRowId[ELEMS_PER_PAGE];
+
+    /**
+     *  Constructs a data page view from the indicated block.
+     */
+    TranslationPage(BlockIo block) {
+        super(block);
+    }
+
+    /**
+     *  Factory method to create or return a data page for the
+     *  indicated block.
+     */
+    static TranslationPage getTranslationPageView(BlockIo block) {
+        BlockView view = block.getView();
+        if (view != null && view instanceof TranslationPage)
+            return (TranslationPage) view;
+        else
+            return new TranslationPage(block);
+    }
+
+    /** Returns the value of the indicated rowid on the page */
+    PhysicalRowId get(short offset) {
+        int slot = (offset - O_TRANS) / PhysicalRowId.SIZE;
+        if (slots[slot] == null) 
+            slots[slot] = new PhysicalRowId(block, offset);
+        return slots[slot];
+    }
+}

Added: directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/package.html
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/package.html?rev=652410&view=auto
==============================================================================
--- directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/package.html (added)
+++ directory/apacheds/branches/bigbang/apacheds-jdbm/src/main/java/jdbm/recman/package.html Wed Apr 30 17:06:41 2008
@@ -0,0 +1,12 @@
+<!-- $Id: package.html,v 1.1 2001/05/19 16:01:33 boisvert Exp $ -->
+<html>
+  <body>
+    <p>Core classes for managing persistent objects and processing transactions.</p>
+
+    <dl>
+      <dt><b>Version: </b></dt><dd>$Revision: 1.1 $ $Date: 2001/05/19 16:01:33 $</dd>
+      <dt><b>Author: </b></dt><dd><a href="mailto:boisvert@intalio.com">Alex Boisvert</a></dd>
+    </dl>
+
+  </body>
+</html>

Modified: directory/apacheds/branches/bigbang/bootstrap-plugin/src/main/java/org/apache/directory/server/core/bootstrap/plugin/BootstrapPlugin.java
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/bootstrap-plugin/src/main/java/org/apache/directory/server/core/bootstrap/plugin/BootstrapPlugin.java?rev=652410&r1=652409&r2=652410&view=diff
==============================================================================
--- directory/apacheds/branches/bigbang/bootstrap-plugin/src/main/java/org/apache/directory/server/core/bootstrap/plugin/BootstrapPlugin.java (original)
+++ directory/apacheds/branches/bigbang/bootstrap-plugin/src/main/java/org/apache/directory/server/core/bootstrap/plugin/BootstrapPlugin.java Wed Apr 30 17:06:41 2008
@@ -53,7 +53,6 @@
 import org.apache.directory.shared.ldap.constants.SchemaConstants;
 import org.apache.directory.shared.ldap.entry.Modification;
 import org.apache.directory.shared.ldap.entry.ModificationOperation;
-import org.apache.directory.shared.ldap.message.AttributesImpl;
 import org.apache.directory.shared.ldap.name.LdapDN;
 import org.apache.directory.shared.ldap.schema.AttributeType;
 import org.apache.directory.shared.ldap.schema.MatchingRule;
@@ -68,7 +67,6 @@
 import org.codehaus.plexus.util.FileUtils;
 
 import javax.naming.NamingException;
-import javax.naming.directory.Attributes;
 
 import java.io.File;
 import java.io.FileWriter;
@@ -155,7 +153,7 @@
     private String[] indexedAttributes;
 
     /**
-     * Facotry used to create attributes objects from schema entities.
+     * Factory used to create attributes objects from schema entities.
      */
     private AttributesFactory attributesFactory = new AttributesFactory();
 
@@ -183,7 +181,7 @@
     public void execute() throws MojoExecutionException, MojoFailureException
     {
         File packageDirectory = new File( outputDirectory, outputPackage.replace( '.', File.separatorChar ) );
-        
+
         if ( !packageDirectory.exists() )
         {
             packageDirectory.mkdirs();
@@ -191,7 +189,7 @@
 
         // delete output directory if it exists
         File schemaDirectory = new File( packageDirectory, "schema" );
-        
+
         if ( schemaDirectory.exists() )
         {
             try
@@ -201,20 +199,19 @@
             catch ( IOException e )
             {
                 throw new MojoFailureException( "Failed to delete old schema partition folder "
-                        + schemaDirectory.getAbsolutePath() + ": " + e.getMessage() );
+                    + schemaDirectory.getAbsolutePath() + ": " + e.getMessage() );
             }
         }
 
         initializeSchemas();
-        
+
         try
         {
             initializePartition( schemaDirectory );
         }
         catch ( NamingException ne )
         {
-            throw new MojoFailureException( "Failed to initialize the root partition :" + 
-                ne.getMessage() );
+            throw new MojoFailureException( "Failed to initialize the root partition :" + ne.getMessage() );
         }
 
         try
@@ -224,7 +221,7 @@
 
             if ( !hasEntry( dn ) )
             {
-                Attributes entry = new AttributesImpl();
+                ServerEntry entry = new DefaultServerEntry( registries, dn );
                 entry.put( SchemaConstants.OBJECT_CLASS_AT, SchemaConstants.TOP_OC );
                 entry.get( SchemaConstants.OBJECT_CLASS_AT ).add( SchemaConstants.ORGANIZATIONAL_UNIT_OC );
                 entry.put( SchemaConstants.OU_AT, "schema" );
@@ -257,12 +254,11 @@
                 getLog().info( "" );
                 getLog().info( "------------------------------------------------------------------------" );
             }
-            
+
             createSchemaModificationAttributesEntry();
         }
         catch ( NamingException e )
         {
-            e.printStackTrace();
             throw new MojoFailureException( "Failed to add syntaxCheckers to partition: " + e.getMessage() );
         }
 
@@ -279,7 +275,6 @@
         // Create db file listing and place it into the right package on disk
         // ------------------------------------------------------------------
 
-
         File listingFile = new File( packageDirectory, listingFileName );
         PrintWriter out = null;
         try
@@ -306,18 +301,19 @@
         }
     }
 
+    private static final String[] OTHER_SCHEMA_DEPENDENCIES = new String[]
+        { "system", "core", "apache", "apachemeta" };
+
 
-    private static final String[] OTHER_SCHEMA_DEPENDENCIES = new String[] { "system", "core", "apache", "apachemeta" };
-    
     private void createSchemasAndContainers() throws NamingException
     {
         Map<String, Schema> schemaMap = this.registries.getLoadedSchemas();
-        
-        for ( Schema schema:schemaMap.values() )
+
+        for ( Schema schema : schemaMap.values() )
         {
             createSchemaAndContainers( schema );
         }
-        
+
         Schema other = new Schema()
         {
             public String[] getDependencies()
@@ -325,96 +321,102 @@
                 return OTHER_SCHEMA_DEPENDENCIES;
             }
 
+
             public String getOwner()
             {
                 return "uid=admin,ou=system";
             }
 
+
             public String getSchemaName()
             {
                 return "other";
             }
 
+
             public boolean isDisabled()
             {
                 return false;
             }
         };
-        
+
         createSchemaAndContainers( other );
     }
 
 
     private void createSchemaAndContainers( Schema schema ) throws NamingException
     {
-        LdapDN dn = new LdapDN( SchemaConstants.CN_AT + "=" 
-            + schema.getSchemaName() + "," + SchemaConstants.OU_AT + "=schema" );
+        LdapDN dn = new LdapDN( SchemaConstants.CN_AT + "=" + schema.getSchemaName() + "," + SchemaConstants.OU_AT
+            + "=schema" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
 
         if ( hasEntry( dn ) )
         {
             return;
         }
-        
-        Attributes entry = attributesFactory.getAttributes( schema );
+
+        ServerEntry entry = attributesFactory.getAttributes( schema, registries );
+        entry.setDn( dn );
         store.add( dn, entry );
-        
+
+        dn = ( LdapDN ) dn.clone();
+
         dn.add( SchemaConstants.OU_AT + "=comparators" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
         checkCreateContainer( dn );
-        
+
         dn.remove( dn.size() - 1 );
         dn.add( SchemaConstants.OU_AT + "=normalizers" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
         checkCreateContainer( dn );
-        
+
         dn.remove( dn.size() - 1 );
         dn.add( SchemaConstants.OU_AT + "=syntaxCheckers" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
         checkCreateContainer( dn );
-        
+
         dn.remove( dn.size() - 1 );
         dn.add( SchemaConstants.OU_AT + "=syntaxes" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
         checkCreateContainer( dn );
-        
+
         dn.remove( dn.size() - 1 );
         dn.add( SchemaConstants.OU_AT + "=matchingRules" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
         checkCreateContainer( dn );
-        
+
         dn.remove( dn.size() - 1 );
         dn.add( SchemaConstants.OU_AT + "=attributeTypes" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
         checkCreateContainer( dn );
-        
+
         dn.remove( dn.size() - 1 );
         dn.add( SchemaConstants.OU_AT + "=objectClasses" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
         checkCreateContainer( dn );
-        
+
         dn.remove( dn.size() - 1 );
         dn.add( SchemaConstants.OU_AT + "=nameForms" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
         checkCreateContainer( dn );
-        
+
         dn.remove( dn.size() - 1 );
         dn.add( SchemaConstants.OU_AT + "=ditStructureRules" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
         checkCreateContainer( dn );
-        
+
         dn.remove( dn.size() - 1 );
         dn.add( SchemaConstants.OU_AT + "=ditContentRules" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
         checkCreateContainer( dn );
-        
+
         dn.remove( dn.size() - 1 );
         dn.add( SchemaConstants.OU_AT + "=matchingRuleUse" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
         checkCreateContainer( dn );
     }
 
-    
+
     private void addAttributeTypes() throws NamingException
     {
         getLog().info( "------------------------------------------------------------------------" );
@@ -423,9 +425,9 @@
         getLog().info( "" );
 
         AttributeTypeRegistry attributeTypeRegistry = registries.getAttributeTypeRegistry();
-        
+
         Iterator<AttributeType> ii = attributeTypeRegistry.iterator();
-        
+
         while ( ii.hasNext() )
         {
             AttributeType at = ii.next();
@@ -436,12 +438,13 @@
             dn.add( SchemaConstants.OU_AT + "=attributeTypes" );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
             checkCreateContainer( dn );
-            Attributes entry = attributesFactory.getAttributes( at, schema );
+            ServerEntry entry = attributesFactory.getAttributes( at, schema, registries );
             dn.add( MetaSchemaConstants.M_OID_AT + "=" + at.getOid() );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
+            entry.setDn( dn );
             store.add( dn, entry );
         }
-        
+
         getLog().info( "" );
     }
 
@@ -455,7 +458,7 @@
 
         ObjectClassRegistry objectClassRegistry = registries.getObjectClassRegistry();
         Iterator<ObjectClass> ii = objectClassRegistry.iterator();
-        
+
         while ( ii.hasNext() )
         {
             ObjectClass oc = ii.next();
@@ -466,12 +469,13 @@
             dn.add( SchemaConstants.OU_AT + "=objectClasses" );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
             checkCreateContainer( dn );
-            Attributes entry = attributesFactory.getAttributes( oc, schema );
+            ServerEntry entry = attributesFactory.getAttributes( oc, schema, registries );
             dn.add( MetaSchemaConstants.M_OID_AT + "=" + oc.getOid() );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
+            entry.setDn( dn );
             store.add( dn, entry );
         }
-        
+
         getLog().info( "" );
     }
 
@@ -485,7 +489,7 @@
 
         MatchingRuleRegistry matchingRuleRegistry = registries.getMatchingRuleRegistry();
         Iterator<MatchingRule> ii = matchingRuleRegistry.iterator();
-        
+
         while ( ii.hasNext() )
         {
             MatchingRule mr = ii.next();
@@ -496,12 +500,13 @@
             dn.add( SchemaConstants.OU_AT + "=matchingRules" );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
             checkCreateContainer( dn );
-            Attributes entry = attributesFactory.getAttributes( mr, schema );
+            ServerEntry entry = attributesFactory.getAttributes( mr, schema, registries );
             dn.add( MetaSchemaConstants.M_OID_AT + "=" + mr.getOid() );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
+            entry.setDn( dn );
             store.add( dn, entry );
         }
-        
+
         getLog().info( "" );
     }
 
@@ -515,7 +520,7 @@
 
         ComparatorRegistry comparatorRegistry = registries.getComparatorRegistry();
         Iterator<String> ii = comparatorRegistry.oidIterator();
-        
+
         while ( ii.hasNext() )
         {
             String oid = ii.next();
@@ -526,9 +531,11 @@
             dn.add( SchemaConstants.OU_AT + "=comparators" );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
             checkCreateContainer( dn );
-            Attributes entry = attributesFactory.getAttributes( oid, comparatorRegistry.lookup( oid ), schema );
+            ServerEntry entry = attributesFactory.getAttributes( oid, comparatorRegistry.lookup( oid ), schema,
+                registries );
             dn.add( MetaSchemaConstants.M_OID_AT + "=" + oid );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
+            entry.setDn( dn );
             store.add( dn, entry );
         }
         getLog().info( "" );
@@ -544,7 +551,7 @@
 
         NormalizerRegistry normalizerRegistry = registries.getNormalizerRegistry();
         Iterator<String> ii = normalizerRegistry.oidIterator();
-        
+
         while ( ii.hasNext() )
         {
             String oid = ii.next();
@@ -555,12 +562,14 @@
             dn.add( SchemaConstants.OU_AT + "=normalizers" );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
             checkCreateContainer( dn );
-            Attributes entry = attributesFactory.getAttributes( oid, normalizerRegistry.lookup( oid ), schema );
+            ServerEntry entry = attributesFactory.getAttributes( oid, normalizerRegistry.lookup( oid ), schema,
+                registries );
             dn.add( MetaSchemaConstants.M_OID_AT + "=" + oid );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
+            entry.setDn( dn );
             store.add( dn, entry );
         }
-        
+
         getLog().info( "" );
     }
 
@@ -574,7 +583,7 @@
 
         SyntaxRegistry syntaxRegistry = registries.getSyntaxRegistry();
         Iterator<Syntax> ii = syntaxRegistry.iterator();
-        
+
         while ( ii.hasNext() )
         {
             Syntax syntax = ii.next();
@@ -584,9 +593,10 @@
             dn.add( SchemaConstants.OU_AT + "=syntaxes" );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
             checkCreateContainer( dn );
-            Attributes entry = attributesFactory.getAttributes( syntax, schema );
+            ServerEntry entry = attributesFactory.getAttributes( syntax, schema, registries );
             dn.add( MetaSchemaConstants.M_OID_AT + "=" + syntax.getOid() );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
+            entry.setDn( dn );
             store.add( dn, entry );
         }
         getLog().info( "" );
@@ -602,7 +612,7 @@
 
         SyntaxCheckerRegistry syntaxCheckerRegistry = registries.getSyntaxCheckerRegistry();
         Iterator<SyntaxChecker> ii = syntaxCheckerRegistry.iterator();
-        
+
         while ( ii.hasNext() )
         {
             SyntaxChecker syntaxChecker = ii.next();
@@ -613,12 +623,13 @@
             dn.add( SchemaConstants.OU_AT + "=syntaxCheckers" );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
             checkCreateContainer( dn );
-            Attributes entry = attributesFactory.getAttributes( syntaxChecker, schema );
+            ServerEntry entry = attributesFactory.getAttributes( syntaxChecker, schema, registries );
             dn.add( MetaSchemaConstants.M_OID_AT + "=" + syntaxChecker.getSyntaxOid() );
             dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
+            entry.setDn( dn );
             store.add( dn, entry );
         }
-        
+
         getLog().info( "" );
     }
 
@@ -640,28 +651,27 @@
 
         // add the indices
         Set<JdbmIndex> userIndices = new HashSet<JdbmIndex>();
-        
-        for ( String indexedAttribute:indexedAttributes )
+
+        for ( String indexedAttribute : indexedAttributes )
         {
             JdbmIndex index = new JdbmIndex();
             index.setAttributeId( indexedAttribute );
             userIndices.add( index );
         }
-        
+
         store.setUserIndices( userIndices );
 
         ServerEntry rootEntry = new DefaultServerEntry( registries, new LdapDN( ServerDNConstants.OU_SCHEMA_DN ) );
-        rootEntry.put( SchemaConstants.OBJECT_CLASS_AT, SchemaConstants.ORGANIZATIONAL_UNIT_OC );
+        rootEntry.put( SchemaConstants.OBJECT_CLASS_AT, SchemaConstants.TOP_OC, SchemaConstants.ORGANIZATIONAL_UNIT_OC );
         rootEntry.put( SchemaConstants.OU_AT, "schema" );
         store.setContextEntry( rootEntry );
 
         try
         {
-            store.init( this.registries.getOidRegistry(), this.registries.getAttributeTypeRegistry() );
+            store.init( this.registries );
         }
         catch ( NamingException e )
         {
-            e.printStackTrace();
             throw new MojoFailureException( "Failed to initialize parition: " + e.getMessage() );
         }
     }
@@ -677,29 +687,28 @@
      */
     private void createSchemaModificationAttributesEntry() throws NamingException
     {
-        Attributes entry = new AttributesImpl( 
-            SchemaConstants.OBJECT_CLASS_AT, 
-            ApacheSchemaConstants.SCHEMA_MODIFICATION_ATTRIBUTES_OC,
-            true );
-        entry.get( SchemaConstants.OBJECT_CLASS_AT ).add( SchemaConstants.TOP_OC );
-        
+        ServerEntry entry = new DefaultServerEntry( registries );
+        entry.put( SchemaConstants.OBJECT_CLASS_AT, ApacheSchemaConstants.SCHEMA_MODIFICATION_ATTRIBUTES_OC,
+            SchemaConstants.TOP_OC );
+
         entry.put( ApacheSchemaConstants.SCHEMA_MODIFIERS_NAME_AT, ADMIN_NORM_NAME );
         entry.put( SchemaConstants.MODIFIERS_NAME_AT, ADMIN_NORM_NAME );
         entry.put( SchemaConstants.CREATORS_NAME_AT, ADMIN_NORM_NAME );
-        
+
         entry.put( ApacheSchemaConstants.SCHEMA_MODIFY_TIMESTAMP_AT, DateUtils.getGeneralizedTime() );
         entry.put( SchemaConstants.MODIFY_TIMESTAMP_AT, DateUtils.getGeneralizedTime() );
         entry.put( SchemaConstants.CREATE_TIMESTAMP_AT, DateUtils.getGeneralizedTime() );
-        
+
         entry.put( SchemaConstants.CN_AT, "schemaModifications" );
         entry.put( ApacheSchemaConstants.SUBSCHEMA_SUBENTRY_NAME_AT, "cn=schema" );
-        
+
         LdapDN normName = new LdapDN( "cn=schemaModifications,ou=schema" );
         normName.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
+        entry.setDn( normName );
         store.add( normName, entry );
     }
-    
-    
+
+
     /**
      * Loads all the bootstrap schemas into the registries in preparation for
      * loading them into the schema partition.
@@ -734,21 +743,22 @@
         ClassLoader parent = getClass().getClassLoader();
         URL[] urls = new URL[classpathElements.size()];
         int i = 0;
-        
-        for ( String classpathElement:classpathElements )
+
+        for ( String classpathElement : classpathElements )
         {
             try
             {
                 urls[i++] = new File( classpathElement ).toURI().toURL();
-            } 
+            }
             catch ( MalformedURLException e )
             {
-                throw ( MojoFailureException ) new MojoFailureException( "Could not construct classloader: " ).initCause( e );
+                throw ( MojoFailureException ) new MojoFailureException( "Could not construct classloader: " )
+                    .initCause( e );
             }
         }
-        
+
         ClassLoader cl = new URLClassLoader( urls, parent );
-        
+
         for ( int ii = 0; ii < bootstrapSchemaClasses.length; ii++ )
         {
             try
@@ -760,32 +770,34 @@
             catch ( ClassNotFoundException e )
             {
                 getLog().info( "ClassLoader " + getClass().getClassLoader() );
-                getLog().info( "ClassLoader URLs: " + Arrays.asList( ( ( URLClassLoader ) getClass().getClassLoader() ).getURLs() ) );
+                getLog()
+                    .info(
+                        "ClassLoader URLs: "
+                            + Arrays.asList( ( ( URLClassLoader ) getClass().getClassLoader() ).getURLs() ) );
                 e.printStackTrace();
-                throw new MojoFailureException( "Could not find BootstrapSchema class: "
-                        + bootstrapSchemaClasses[ii] );
+                throw new MojoFailureException( "Could not find BootstrapSchema class: " + bootstrapSchemaClasses[ii] );
             }
             catch ( InstantiationException e )
             {
                 e.printStackTrace();
                 throw new MojoFailureException( "Could not instantiate BootstrapSchema class: "
-                        + bootstrapSchemaClasses[ii] );
+                    + bootstrapSchemaClasses[ii] );
             }
             catch ( IllegalAccessException e )
             {
                 e.printStackTrace();
                 throw new MojoFailureException( "Could not instantiate BootstrapSchema class due to security: "
-                        + bootstrapSchemaClasses[ii] );
+                    + bootstrapSchemaClasses[ii] );
             }
 
             getLog().info( "\t" + bootstrapSchemaClasses[ii] );
         }
-        
+
         getLog().info( "" );
 
         BootstrapSchemaLoader loader = new BootstrapSchemaLoader( cl );
         registries = new DefaultRegistries( "bootstrap", loader, new DefaultOidRegistry() );
-        
+
         try
         {
             loader.loadWithDependencies( schemas.values(), registries );
@@ -802,24 +814,24 @@
 
     private void checkCreateContainer( LdapDN dn ) throws NamingException
     {
-        if ( hasEntry( dn ) )
+        LdapDN clonedDn = ( LdapDN ) dn.clone();
+
+        if ( hasEntry( clonedDn ) )
         {
             return;
         }
 
-        Attributes entry = new AttributesImpl();
-        entry.put( SchemaConstants.OBJECT_CLASS_AT, SchemaConstants.TOP_OC );
-        entry.get( SchemaConstants.OBJECT_CLASS_AT ).add( SchemaConstants.ORGANIZATIONAL_UNIT_OC );
-        entry.put( SchemaConstants.OU_AT, dn.getRdn().getValue() );
-        store.add( dn, entry );
+        ServerEntry entry = new DefaultServerEntry( registries, clonedDn );
+        entry.put( SchemaConstants.OBJECT_CLASS_AT, SchemaConstants.TOP_OC, SchemaConstants.ORGANIZATIONAL_UNIT_OC );
+        entry.put( SchemaConstants.OU_AT, ( String ) clonedDn.getRdn().getValue() );
+        store.add( clonedDn, entry );
     }
 
 
     private LdapDN checkCreateSchema( String schemaName ) throws NamingException
     {
         Schema schema = schemas.get( schemaName );
-        LdapDN dn = new LdapDN( SchemaConstants.CN_AT + "="
-                + schemaName + "," + SchemaConstants.OU_AT + "=schema" );
+        LdapDN dn = new LdapDN( SchemaConstants.CN_AT + "=" + schemaName + "," + SchemaConstants.OU_AT + "=schema" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
 
         if ( hasEntry( dn ) )
@@ -827,7 +839,8 @@
             return dn;
         }
 
-        Attributes entry = attributesFactory.getAttributes( schema );
+        ServerEntry entry = attributesFactory.getAttributes( schema, registries );
+        entry.setDn( dn );
         store.add( dn, entry );
         return dn;
     }
@@ -835,16 +848,13 @@
 
     private void disableSchema( String schemaName ) throws NamingException
     {
-        LdapDN dn = new LdapDN( SchemaConstants.CN_AT + "=" + schemaName
-                + "," + SchemaConstants.OU_AT + "=schema" );
+        LdapDN dn = new LdapDN( SchemaConstants.CN_AT + "=" + schemaName + "," + SchemaConstants.OU_AT + "=schema" );
         dn.normalize( registries.getAttributeTypeRegistry().getNormalizerMapping() );
-        
-        Modification mod = new ServerModification( ModificationOperation.ADD_ATTRIBUTE,
-                new DefaultServerAttribute( 
-                    MetaSchemaConstants.M_DISABLED_AT, 
-                    registries.getAttributeTypeRegistry().lookup( MetaSchemaConstants.M_DISABLED_AT ),
-                    "TRUE" ) );
-        
+
+        Modification mod = new ServerModification( ModificationOperation.ADD_ATTRIBUTE, new DefaultServerAttribute(
+            MetaSchemaConstants.M_DISABLED_AT, registries.getAttributeTypeRegistry().lookup(
+                MetaSchemaConstants.M_DISABLED_AT ), "TRUE" ) );
+
         List<Modification> mods = new ArrayList<Modification>();
         mods.add( mod );
         store.modify( dn, mods );
@@ -866,7 +876,7 @@
     private final boolean hasEntry( LdapDN dn ) throws NamingException
     {
         Long id = store.getEntryId( dn.toNormName() );
-        
+
         return ( id != null );
     }
 
@@ -877,18 +887,18 @@
         buf.append( "schema/master.db\n" );
 
         Iterator<String> systemIndices = store.systemIndices();
-        
+
         while ( systemIndices.hasNext() )
         {
-            Index index = store.getSystemIndex(systemIndices.next() );
+            Index index = store.getSystemIndex( systemIndices.next() );
             buf.append( "schema/" );
             buf.append( index.getAttribute().getName() );
             buf.append( ".db\n" );
         }
 
         buf.append( "[USER INDICES]\n" );
-        
-        for ( String indexedAttribute:indexedAttributes )
+
+        for ( String indexedAttribute : indexedAttributes )
         {
             buf.append( "schema/" );
             buf.append( indexedAttribute );
@@ -897,4 +907,57 @@
 
         return buf;
     }
+
+
+    /** A main to be able to debug the plugin */
+    public static void main( String[] args ) throws Exception
+    {
+        BootstrapPlugin bp = new BootstrapPlugin();
+
+        bp.disabledSchemas = new String[]
+            { "nis", "krb5kdc", "samba", "autofs", "apachedns", "corba", "dhcp", "mozilla" };
+
+        bp.indexedAttributes = new String[]
+            { "objectClass", "ou", "cn", "m-oid", "m-disabled" };
+
+        bp.bootstrapSchemaClasses = new String[]
+            { "org.apache.directory.server.schema.bootstrap.ApachednsSchema",
+                "org.apache.directory.server.schema.bootstrap.AutofsSchema",
+                "org.apache.directory.server.schema.bootstrap.CollectiveSchema",
+                "org.apache.directory.server.schema.bootstrap.CorbaSchema",
+                "org.apache.directory.server.schema.bootstrap.CosineSchema",
+                "org.apache.directory.server.schema.bootstrap.DhcpSchema",
+                "org.apache.directory.server.schema.bootstrap.InetorgpersonSchema",
+                "org.apache.directory.server.schema.bootstrap.JavaSchema",
+                "org.apache.directory.server.schema.bootstrap.Krb5kdcSchema",
+                "org.apache.directory.server.schema.bootstrap.MozillaSchema",
+                "org.apache.directory.server.schema.bootstrap.NisSchema",
+                "org.apache.directory.server.schema.bootstrap.SambaSchema" };
+
+        bp.outputDirectory = new File( "/tmp" );
+        bp.outputPackage = "org.apache.directory.server.schema.bootstrap.partition";
+        bp.listingFileName = "DBFILES";
+        bp.classpathElements = Arrays
+            .asList( new String[]
+                {
+                    "/home/elecharny/apacheds/bb/apacheds/bootstrap-partition/target/classes",
+                    "/home/elecharny/.m2/repository/org/apache/directory/server/apacheds-jdbm-store/1.5.3-SNAPSHOT/apacheds-jdbm-store-1.5.3-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/server/apacheds-jdbm/1.5.3-SNAPSHOT/apacheds-jdbm-1.5.3-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/shared/shared-ldap/0.9.11-SNAPSHOT/shared-ldap-0.9.11-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/commons-lang/commons-lang/2.3/commons-lang-2.3.jar",
+                    "/home/elecharny/.m2/repository/commons-collections/commons-collections/3.2/commons-collections-3.2.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/shared/shared-asn1/0.9.11-SNAPSHOT/shared-asn1-0.9.11-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/org/slf4j/slf4j-api/1.4.3/slf4j-api-1.4.3.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/shared/shared-ldap-constants/0.9.11-SNAPSHOT/shared-ldap-constants-0.9.11-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/antlr/antlr/2.7.7/antlr-2.7.7.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/server/apacheds-core-constants/1.5.3-SNAPSHOT/apacheds-core-constants-1.5.3-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/server/apacheds-core-entry/1.5.3-SNAPSHOT/apacheds-core-entry-1.5.3-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/server/apacheds-schema-registries/1.5.3-SNAPSHOT/apacheds-schema-registries-1.5.3-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/server/apacheds-core-shared/1.5.3-SNAPSHOT/apacheds-core-shared-1.5.3-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/server/apacheds-btree-base/1.5.3-SNAPSHOT/apacheds-btree-base-1.5.3-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/server/apacheds-bootstrap-extract/1.5.3-SNAPSHOT/apacheds-bootstrap-extract-1.5.3-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/server/apacheds-schema-extras/1.5.3-SNAPSHOT/apacheds-schema-extras-1.5.3-SNAPSHOT.jar",
+                    "/home/elecharny/.m2/repository/org/apache/directory/server/apacheds-schema-bootstrap/1.5.3-SNAPSHOT/apacheds-schema-bootstrap-1.5.3-SNAPSHOT.jar" } );
+        bp.execute();
+    }
 }

Modified: directory/apacheds/branches/bigbang/btree-base/pom.xml
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/btree-base/pom.xml?rev=652410&r1=652409&r2=652410&view=diff
==============================================================================
--- directory/apacheds/branches/bigbang/btree-base/pom.xml (original)
+++ directory/apacheds/branches/bigbang/btree-base/pom.xml Wed Apr 30 17:06:41 2008
@@ -41,6 +41,12 @@
       <artifactId>apacheds-schema-registries</artifactId>
       <version>${pom.version}</version>
     </dependency>
+
+    <dependency>
+      <groupId>${pom.groupId}</groupId>
+      <artifactId>apacheds-core-entry</artifactId>
+      <version>${pom.version}</version>
+    </dependency>
   </dependencies>
 </project>
 

Modified: directory/apacheds/branches/bigbang/btree-base/src/main/java/org/apache/directory/server/core/partition/impl/btree/IndexRecord.java
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/btree-base/src/main/java/org/apache/directory/server/core/partition/impl/btree/IndexRecord.java?rev=652410&r1=652409&r2=652410&view=diff
==============================================================================
--- directory/apacheds/branches/bigbang/btree-base/src/main/java/org/apache/directory/server/core/partition/impl/btree/IndexRecord.java (original)
+++ directory/apacheds/branches/bigbang/btree-base/src/main/java/org/apache/directory/server/core/partition/impl/btree/IndexRecord.java Wed Apr 30 17:06:41 2008
@@ -19,8 +19,7 @@
  */
 package org.apache.directory.server.core.partition.impl.btree;
 
-
-import javax.naming.directory.Attributes;
+import org.apache.directory.server.core.entry.ServerEntry;
 
 
 /**
@@ -34,8 +33,9 @@
 {
     /** The underlying BTree Tuple */
     private final Tuple tuple = new Tuple();
-    /** The referenced entry if resusitated */
-    private Attributes entry = null;
+    
+    /** The referenced entry if ressucitated */
+    private ServerEntry entry = null;
 
 
     /**
@@ -45,7 +45,7 @@
      * @param tuple the tuple for the IndexRecord
      * @param entry the resusitated entry if any
      */
-    public void setTuple( Tuple tuple, Attributes entry )
+    public void setTuple( Tuple tuple, ServerEntry entry )
     {
         this.tuple.setKey( tuple.getKey() );
         this.tuple.setValue( tuple.getValue() );
@@ -60,7 +60,7 @@
      * @param tuple the tuple for the IndexRecord
      * @param entry the resusitated entry if any
      */
-    public void setSwapped( Tuple tuple, Attributes entry )
+    public void setSwapped( Tuple tuple, ServerEntry entry )
     {
         this.tuple.setKey( tuple.getValue() );
         this.tuple.setValue( tuple.getKey() );
@@ -118,14 +118,14 @@
      * 
      * @return the entry's attributes
      */
-    public Attributes getAttributes()
+    public ServerEntry getEntry()
     {
         if ( entry == null )
         {
             return null;
         }
 
-        return ( Attributes ) entry.clone();
+        return ( ServerEntry ) entry.clone();
     }
 
 
@@ -134,7 +134,7 @@
      * 
      * @param entry the entry's attributes
      */
-    public void setAttributes( Attributes entry )
+    public void setEntry( ServerEntry entry )
     {
         this.entry = entry;
     }
@@ -159,7 +159,7 @@
      */
     public void copy( IndexRecord record )
     {
-        entry = record.getAttributes();
+        entry = record.getEntry();
         tuple.setKey( record.getIndexKey() );
         tuple.setValue( record.getEntryId() );
     }

Modified: directory/apacheds/branches/bigbang/btree-base/src/main/java/org/apache/directory/server/core/partition/impl/btree/MasterTable.java
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/btree-base/src/main/java/org/apache/directory/server/core/partition/impl/btree/MasterTable.java?rev=652410&r1=652409&r2=652410&view=diff
==============================================================================
--- directory/apacheds/branches/bigbang/btree-base/src/main/java/org/apache/directory/server/core/partition/impl/btree/MasterTable.java (original)
+++ directory/apacheds/branches/bigbang/btree-base/src/main/java/org/apache/directory/server/core/partition/impl/btree/MasterTable.java Wed Apr 30 17:06:41 2008
@@ -21,11 +21,12 @@
 
 
 import javax.naming.NamingException;
-import javax.naming.directory.Attributes;
+
+import org.apache.directory.server.core.entry.ServerEntry;
 
 
 /**
- * The master table used to store the Attributes of entries.
+ * The master table used to store the ServerEntry of entries.
  *
  * @author <a href="mailto:dev@directory.apache.org">Apache Directory Project</a>
  * @version $Rev$
@@ -40,36 +41,36 @@
 
 
     /**
-     * Gets the Attributes of an entry from this MasterTable.
+     * Gets the ServerEntry from this MasterTable.
      *
-     * @param id the BigInteger id of the entry to retrieve.
-     * @return the Attributes of the entry with operational attributes and all.
+     * @param id the Long id of the entry to retrieve.
+     * @return the ServerEntry with operational attributes and all.
      * @throws NamingException if there is a read error on the underlying Db.
      */
-    Attributes get( Object id ) throws NamingException;
+    ServerEntry get( Object id ) throws NamingException;
 
 
     /**
-     * Puts the Attributes of an entry into this master table at an index 
+     * Puts the ServerEntry into this master table at an index 
      * specified by id.  Used both to create new entries and update existing 
      * ones.
      *
-     * @param entry the Attributes of entry w/ operational attributes
-     * @param id the BigInteger id of the entry to put
-     * @return the newly created entry's Attributes
+     * @param entry the ServerEntry w/ operational attributes
+     * @param id the Long id of the entry to put
+     * @return the newly created ServerEntry
      * @throws NamingException if there is a write error on the underlying Db.
      */
-    Attributes put( Attributes entry, Object id ) throws NamingException;
+    ServerEntry put( ServerEntry entry, Object id ) throws NamingException;
 
 
     /**
-     * Deletes a entry from the master table at an index specified by id.
+     * Deletes a ServerEntry from the master table at an index specified by id.
      *
-     * @param id the BigInteger id of the entry to delete
-     * @return the Attributes of the deleted entry
+     * @param id the Long id of the entry to delete
+     * @return the deleted ServerEntry
      * @throws NamingException if there is a write error on the underlying Db
      */
-    Attributes delete( Object id ) throws NamingException;
+    ServerEntry delete( Object id ) throws NamingException;
 
 
     /**
@@ -85,7 +86,7 @@
 
     /**
      * Get's the next value from this SequenceBDb.  This has the side-effect of
-     * changing the current sequence values perminantly in memory and on disk.
+     * changing the current sequence values permanently in memory and on disk.
      *
      * @return the current value incremented by one.
      * @throws NamingException if the admin table storing sequences cannot be
@@ -95,7 +96,7 @@
 
 
     /**
-     * Gets a persistant property stored in the admin table of this MasterTable.
+     * Gets a persistent property stored in the admin table of this MasterTable.
      *
      * @param property the key of the property to get the value of
      * @return the value of the property
@@ -105,7 +106,7 @@
 
 
     /**
-     * Sets a persistant property stored in the admin table of this MasterTable.
+     * Sets a persistent property stored in the admin table of this MasterTable.
      *
      * @param property the key of the property to set the value of
      * @param value the value of the property

Modified: directory/apacheds/branches/bigbang/core-entry/pom.xml
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/core-entry/pom.xml?rev=652410&r1=652409&r2=652410&view=diff
==============================================================================
--- directory/apacheds/branches/bigbang/core-entry/pom.xml (original)
+++ directory/apacheds/branches/bigbang/core-entry/pom.xml Wed Apr 30 17:06:41 2008
@@ -41,18 +41,27 @@
       <version>${pom.version}</version>
       <artifactId>apacheds-schema-registries</artifactId>
     </dependency>
+    
     <dependency>
       <groupId>${pom.groupId}</groupId>
       <version>${pom.version}</version>
       <artifactId>apacheds-schema-bootstrap</artifactId>
       <scope>test</scope>
     </dependency>
+    
     <dependency>
       <groupId>${pom.groupId}</groupId>
       <version>${pom.version}</version>
       <artifactId>apacheds-schema-extras</artifactId>
       <scope>test</scope>
     </dependency>
+
+    <dependency>
+      <groupId>${pom.groupId}</groupId>
+      <version>${pom.version}</version>
+      <artifactId>apacheds-jdbm</artifactId>
+    </dependency>
+    
   </dependencies>
   
 </project>

Modified: directory/apacheds/branches/bigbang/core-entry/src/main/java/org/apache/directory/server/core/entry/ServerBinaryValue.java
URL: http://svn.apache.org/viewvc/directory/apacheds/branches/bigbang/core-entry/src/main/java/org/apache/directory/server/core/entry/ServerBinaryValue.java?rev=652410&r1=652409&r2=652410&view=diff
==============================================================================
--- directory/apacheds/branches/bigbang/core-entry/src/main/java/org/apache/directory/server/core/entry/ServerBinaryValue.java (original)
+++ directory/apacheds/branches/bigbang/core-entry/src/main/java/org/apache/directory/server/core/entry/ServerBinaryValue.java Wed Apr 30 17:06:41 2008
@@ -115,10 +115,19 @@
     public ServerBinaryValue( AttributeType attributeType )
     {
         super();
-        assert checkAttributeType( attributeType) == null : logAssert( checkAttributeType( attributeType ) );
+        
+        if ( attributeType == null )
+        {
+            throw new IllegalArgumentException( "The AttributeType parameter should not be null" );
+        }
 
         try
         {
+            if ( attributeType.getSyntax() == null )
+            {
+                throw new IllegalArgumentException( "There is no Syntax associated with this attributeType" );
+            }
+
             if ( attributeType.getSyntax().isHumanReadable() )
             {
                 LOG.warn( "Treating a value of a human readible attribute {} as binary: ", attributeType.getName() );