You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@directory.apache.org by se...@apache.org on 2010/01/24 20:04:39 UTC

svn commit: r902620 [6/6] - in /directory/sandbox/seelmann/hbase-partition: ./ src/ src/main/ src/main/java/ src/main/java/org/ src/main/java/org/apache/ src/main/java/org/apache/directory/ src/main/java/org/apache/directory/server/ src/main/java/org/a...

Added: directory/sandbox/seelmann/hbase-partition/src/test/java/org/apache/directory/server/core/partition/hbase/table/HBaseMasterTableTest.java
URL: http://svn.apache.org/viewvc/directory/sandbox/seelmann/hbase-partition/src/test/java/org/apache/directory/server/core/partition/hbase/table/HBaseMasterTableTest.java?rev=902620&view=auto
==============================================================================
--- directory/sandbox/seelmann/hbase-partition/src/test/java/org/apache/directory/server/core/partition/hbase/table/HBaseMasterTableTest.java (added)
+++ directory/sandbox/seelmann/hbase-partition/src/test/java/org/apache/directory/server/core/partition/hbase/table/HBaseMasterTableTest.java Sun Jan 24 19:04:37 2010
@@ -0,0 +1,607 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.directory.server.core.partition.hbase.table;
+
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.util.NavigableMap;
+import java.util.UUID;
+
+import javax.naming.InvalidNameException;
+import javax.naming.NamingException;
+
+import org.apache.directory.server.core.entry.DefaultServerEntry;
+import org.apache.directory.server.core.partition.hbase.HBaseClusterTestCaseAdapter;
+import org.apache.directory.server.core.partition.hbase.it.AbstractHBasePartitionIT;
+import org.apache.directory.shared.ldap.csn.CsnFactory;
+import org.apache.directory.shared.ldap.name.LdapDN;
+import org.apache.directory.shared.ldap.schema.SchemaManager;
+import org.apache.directory.shared.ldap.schema.ldif.extractor.SchemaLdifExtractor;
+import org.apache.directory.shared.ldap.schema.ldif.extractor.impl.DefaultSchemaLdifExtractor;
+import org.apache.directory.shared.ldap.schema.loader.ldif.LdifSchemaLoader;
+import org.apache.directory.shared.ldap.schema.manager.impl.DefaultSchemaManager;
+import org.apache.directory.shared.ldap.util.ExceptionUtils;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+
+/**
+ * Tests for {@link HBaseMasterTable}.
+ *
+ * @author <a href="mailto:dev@directory.apache.org">Apache Directory Project</a>
+ * @version $Rev$, $Date$
+ */
+public class HBaseMasterTableTest
+{
+    protected static HBaseClusterTestCaseAdapter adapter;
+
+
+    /**
+     * Startup mini HBase cluster.
+     */
+    @BeforeClass
+    public static void before() throws Exception
+    {
+        adapter = new HBaseClusterTestCaseAdapter( AbstractHBasePartitionIT.class );
+        adapter.setUp();
+    }
+
+
+    /**
+     * Shutdown mini HBase cluster.
+     */
+    @AfterClass
+    public static void after() throws Exception
+    {
+        adapter.tearDown();
+    }
+
+    private static final String TABLE_PREFIX = "apacheds_test_";
+    private static final String MASTER_TABLE_NAME = TABLE_PREFIX + "master";
+    private static final String TREE_TABLE_NAME = TABLE_PREFIX + "tree";
+    private static final CsnFactory CSN_FACTORY = new CsnFactory( 0 );
+    private static SchemaManager schemaManager;
+    private LdapDN suffixDn;
+    private HBaseMasterTable masterTable;
+
+
+    /**
+     * Init schema manager
+     */
+    @BeforeClass
+    public static void init() throws Exception
+    {
+        String workingDirectory = System.getProperty( "workingDirectory" );
+
+        if ( workingDirectory == null )
+        {
+            String path = HBaseMasterTableTest.class.getResource( "" ).getPath();
+            int targetPos = path.indexOf( "target" );
+            workingDirectory = path.substring( 0, targetPos + 6 );
+        }
+
+        File schemaRepository = new File( workingDirectory, "schema" );
+        SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor( new File( workingDirectory ) );
+        extractor.extractOrCopy( true );
+        LdifSchemaLoader loader = new LdifSchemaLoader( schemaRepository );
+        schemaManager = new DefaultSchemaManager( loader );
+
+        boolean loaded = schemaManager.loadAllEnabled();
+
+        if ( !loaded )
+        {
+            fail( "Schema load failed : " + ExceptionUtils.printErrors( schemaManager.getErrors() ) );
+        }
+    }
+
+
+    /**
+     * Cleanup HBase tables before each test.
+     * 
+     */
+    @Before
+    public void setup() throws Exception
+    {
+        HBaseAdmin admin = new HBaseAdmin( adapter.getHBaseConfigurtion() );
+        if ( admin.tableExists( MASTER_TABLE_NAME ) )
+        {
+            HTable masterHTable = new HTable( MASTER_TABLE_NAME );
+            ResultScanner masterScanner = masterHTable.getScanner( new Scan() );
+            Result masterResult;
+            while ( ( masterResult = masterScanner.next() ) != null )
+            {
+                masterHTable.delete( new Delete( masterResult.getRow() ) );
+            }
+
+            HTable treeHTable = new HTable( TREE_TABLE_NAME );
+            ResultScanner treeScanner = treeHTable.getScanner( new Scan() );
+            Result treeResult;
+            while ( ( treeResult = treeScanner.next() ) != null )
+            {
+                treeHTable.delete( new Delete( treeResult.getRow() ) );
+            }
+        }
+
+        suffixDn = new LdapDN( "o=Good Times Co." );
+        suffixDn.normalize( schemaManager.getNormalizerMapping() );
+        masterTable = new HBaseMasterTable( schemaManager, suffixDn, TABLE_PREFIX );
+    }
+
+
+    @Test
+    public void first() throws Exception
+    {
+        DefaultServerEntry entry = buildContextEntry();
+        masterTable.add( entry );
+    }
+
+
+    @Test
+    public void testEmpty() throws Exception
+    {
+        assertNull( masterTable.fetchId( new LdapDN( "a=b" ) ) );
+    }
+
+
+    @Test
+    public void testAdd() throws Exception
+    {
+        // add context entry
+        DefaultServerEntry entry = buildContextEntry();
+        masterTable.add( entry );
+
+        HTable masterHTable = new HTable( MASTER_TABLE_NAME );
+        HTable treeHTable = new HTable( TREE_TABLE_NAME );
+
+        // check state in HBase table 'master' 
+        Get masterGet = new Get( Bytes.toBytes( 1L ) );
+        assertTrue( masterHTable.exists( masterGet ) );
+        Result masterResult = masterHTable.get( masterGet );
+
+        assertEquals( 1L, Bytes.toLong( masterResult.getRow() ) );
+
+        NavigableMap<byte[], byte[]> treeInfoMap = masterResult.getFamilyMap( Bytes.toBytes( "treeInfo" ) );
+        assertNotNull( treeInfoMap );
+        assertEquals( 3, treeInfoMap.size() );
+        assertEquals( "o=Good Times Co.", Bytes.toString( treeInfoMap.get( Bytes.toBytes( "upRdn" ) ) ) );
+        assertEquals( "2.5.4.10=good times co.", Bytes.toString( treeInfoMap.get( Bytes.toBytes( "normRdn" ) ) ) );
+        assertEquals( 0L, Bytes.toLong( treeInfoMap.get( Bytes.toBytes( "parentId" ) ) ) );
+
+        NavigableMap<byte[], byte[]> upAttributesMap = masterResult.getFamilyMap( Bytes.toBytes( "upAttributes" ) );
+        assertNotNull( upAttributesMap );
+        assertEquals( 6, upAttributesMap.size() );
+        assertEquals( "Good Times Co.", Bytes.toString( upAttributesMap.get( Bytes.add( Bytes.toBytes( "o" ), Bytes
+            .toBytes( 0 ) ) ) ) );
+        assertEquals( "organization", Bytes.toString( upAttributesMap.get( Bytes.add( Bytes.toBytes( "objectClass" ),
+            Bytes.toBytes( 0 ) ) ) ) );
+        assertEquals( "1", Bytes.toString( upAttributesMap.get( Bytes.add( Bytes.toBytes( "postalCode" ), Bytes
+            .toBytes( 0 ) ) ) ) );
+        assertEquals( "1", Bytes.toString( upAttributesMap.get( Bytes.add( Bytes.toBytes( "postOfficeBox" ), Bytes
+            .toBytes( 0 ) ) ) ) );
+        assertEquals( entry.get( "entryUUID" ).get().getString(), Bytes.toString( upAttributesMap.get( Bytes.add( Bytes
+            .toBytes( "entryUUID" ), Bytes.toBytes( 0 ) ) ) ) );
+        assertEquals( entry.get( "entryCsn" ).get().getString(), Bytes.toString( upAttributesMap.get( Bytes.add( Bytes
+            .toBytes( "entryCsn" ), Bytes.toBytes( 0 ) ) ) ) );
+
+        // check state in HBase table 'tree' 
+        Get treeGet = new Get( Bytes.add( Bytes.toBytes( 0L ), Bytes.toBytes( "," ), Bytes
+            .toBytes( "2.5.4.10=good times co." ) ) );
+        Result treeResult = treeHTable.get( treeGet );
+
+        //assertEquals( "", Bytes.toLong( treeResult.getRow() ));
+
+        NavigableMap<byte[], byte[]> infoMap = treeResult.getFamilyMap( Bytes.toBytes( "treeInfo" ) );
+        assertNotNull( infoMap );
+        assertEquals( 1, infoMap.size() );
+        assertEquals( 1L, Bytes.toLong( infoMap.get( Bytes.toBytes( "id" ) ) ) );
+        // no oneLevelCount and subLevelCount column yet
+
+        NavigableMap<byte[], byte[]> normAttributesMap = treeResult.getFamilyMap( Bytes.toBytes( "normAttributes" ) );
+        assertNotNull( normAttributesMap );
+        assertEquals( 6, normAttributesMap.size() );
+
+        assertEquals( 0, Bytes.toInt( normAttributesMap.get( Bytes.toBytes( "2.5.4.0=organization" ) ) ) );
+        assertEquals( 0, Bytes.toInt( normAttributesMap.get( Bytes.toBytes( "2.5.4.10=good times co." ) ) ) );
+        assertEquals( 0, Bytes.toInt( normAttributesMap.get( Bytes.toBytes( "2.5.4.17=1" ) ) ) );
+        assertEquals( 0, Bytes.toInt( normAttributesMap.get( Bytes.toBytes( "2.5.4.18=1" ) ) ) );
+        // ...
+
+        // add second entry
+        entry = buildOuSalesEntry();
+        masterTable.add( entry );
+
+        // check in HBase tables
+        masterGet = new Get( Bytes.toBytes( 2L ) );
+        assertTrue( masterHTable.exists( masterGet ) );
+        treeGet = new Get( Bytes.add( Bytes.toBytes( 1L ), Bytes.toBytes( "," ), Bytes.toBytes( "2.5.4.11=sales" ) ) );
+        assertTrue( treeHTable.exists( treeGet ) );
+    }
+
+
+    @Test
+    public void testDelete() throws Exception
+    {
+        // add context entry
+        DefaultServerEntry entry = buildContextEntry();
+        masterTable.add( entry );
+
+        // add second entry
+        entry = buildOuSalesEntry();
+        masterTable.add( entry );
+
+        HTable masterHTable = new HTable( MASTER_TABLE_NAME );
+        HTable treeHTable = new HTable( TREE_TABLE_NAME );
+
+        // delete second entry
+        masterTable.delete( 2L, entry );
+        Get masterGet = new Get( Bytes.toBytes( 2L ) );
+        assertFalse( masterHTable.exists( masterGet ) );
+        Get treeGet = new Get( Bytes.add( Bytes.toBytes( 1L ), Bytes.toBytes( "," ), Bytes.toBytes( "2.5.4.11=sales" ) ) );
+        assertFalse( treeHTable.exists( treeGet ) );
+        assertTrue( masterHTable.exists( new Get( Bytes.toBytes( 1L ) ) ) );
+
+        // delete context entry
+        masterTable.delete( 1L, entry );
+        masterGet = new Get( Bytes.toBytes( 1L ) );
+        assertFalse( masterHTable.exists( masterGet ) );
+        treeGet = new Get( Bytes.add( Bytes.toBytes( 0L ), Bytes.toBytes( "," ), Bytes
+            .toBytes( "2.5.4.10=good times co." ) ) );
+        assertFalse( treeHTable.exists( treeGet ) );
+    }
+
+
+    @Test
+    public void testModify() throws Exception
+    {
+        // add context entry
+        DefaultServerEntry entry = buildContextEntry();
+        masterTable.add( entry );
+
+        // modify some attributes
+        entry.get( "objectClass" ).add( "top" );
+        entry.removeAttributes( "postalCode" );
+        entry.get( "postOfficeBox" ).put( "2" );
+        masterTable.modify( 1L, entry );
+
+        // check state in HBase table 'master' 
+        HTable masterHTable = new HTable( MASTER_TABLE_NAME );
+        Get masterGet = new Get( Bytes.toBytes( 1L ) );
+        assertTrue( masterHTable.exists( masterGet ) );
+        Result masterResult = masterHTable.get( masterGet );
+
+        assertEquals( 1L, Bytes.toLong( masterResult.getRow() ) );
+
+        NavigableMap<byte[], byte[]> treeInfoMap = masterResult.getFamilyMap( Bytes.toBytes( "treeInfo" ) );
+        assertNotNull( treeInfoMap );
+        assertEquals( 3, treeInfoMap.size() );
+        assertEquals( "o=Good Times Co.", Bytes.toString( treeInfoMap.get( Bytes.toBytes( "upRdn" ) ) ) );
+        assertEquals( "2.5.4.10=good times co.", Bytes.toString( treeInfoMap.get( Bytes.toBytes( "normRdn" ) ) ) );
+        assertEquals( 0L, Bytes.toLong( treeInfoMap.get( Bytes.toBytes( "parentId" ) ) ) );
+
+        NavigableMap<byte[], byte[]> upAttributesMap = masterResult.getFamilyMap( Bytes.toBytes( "upAttributes" ) );
+        assertNotNull( upAttributesMap );
+        assertEquals( 6, upAttributesMap.size() );
+        assertEquals( "Good Times Co.", Bytes.toString( upAttributesMap.get( Bytes.add( Bytes.toBytes( "o" ), Bytes
+            .toBytes( 0 ) ) ) ) );
+        assertEquals( "organization", Bytes.toString( upAttributesMap.get( Bytes.add( Bytes.toBytes( "objectClass" ),
+            Bytes.toBytes( 0 ) ) ) ) );
+        assertEquals( "top", Bytes.toString( upAttributesMap.get( Bytes.add( Bytes.toBytes( "objectClass" ), Bytes
+            .toBytes( 1 ) ) ) ) );
+        assertEquals( "2", Bytes.toString( upAttributesMap.get( Bytes.add( Bytes.toBytes( "postOfficeBox" ), Bytes
+            .toBytes( 0 ) ) ) ) );
+        assertEquals( entry.get( "entryUUID" ).get().getString(), Bytes.toString( upAttributesMap.get( Bytes.add( Bytes
+            .toBytes( "entryUUID" ), Bytes.toBytes( 0 ) ) ) ) );
+        assertEquals( entry.get( "entryCsn" ).get().getString(), Bytes.toString( upAttributesMap.get( Bytes.add( Bytes
+            .toBytes( "entryCsn" ), Bytes.toBytes( 0 ) ) ) ) );
+    }
+
+
+    @Test
+    public void testFetch() throws Exception
+    {
+        // add first entry
+        DefaultServerEntry contextEntry = buildContextEntry();
+        masterTable.add( contextEntry );
+
+        // fetch norm attributes
+        long count1 = HBaseTableHelper.RPC_COUNT;
+        NavigableMap<byte[], byte[]> normAttributesMap = masterTable.fetchNormAttributes( 1L );
+        assertNotNull( normAttributesMap );
+        assertEquals( 6, normAttributesMap.size() );
+        assertEquals( 0, Bytes.toInt( normAttributesMap.get( Bytes.toBytes( "2.5.4.0=organization" ) ) ) );
+        assertEquals( 0, Bytes.toInt( normAttributesMap.get( Bytes.toBytes( "2.5.4.10=good times co." ) ) ) );
+        assertEquals( 0, Bytes.toInt( normAttributesMap.get( Bytes.toBytes( "2.5.4.17=1" ) ) ) );
+        assertEquals( 0, Bytes.toInt( normAttributesMap.get( Bytes.toBytes( "2.5.4.18=1" ) ) ) );
+        // ...
+        long count2 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 2, count2 - count1 ); // treeInfo from master table + normAttributes from tree table 
+
+        // fetch again, this time we hit the cache
+        long count5 = HBaseTableHelper.RPC_COUNT;
+        normAttributesMap = masterTable.fetchNormAttributes( 1L );
+        long count6 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 1, count6 - count5 ); // normAttributes from tree table
+    }
+
+
+    @Test
+    public void testFetchId() throws Exception
+    {
+        // add first entry
+        DefaultServerEntry contextEntry = buildContextEntry();
+        masterTable.add( contextEntry );
+
+        // fetch ID
+        long count1 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 1L, masterTable.fetchId( contextEntry.getDn() ).longValue() );
+        long count2 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 1, count2 - count1 );
+
+        // add second entry
+        DefaultServerEntry ouSalesEntry = buildOuSalesEntry();
+        masterTable.add( ouSalesEntry );
+
+        // fetch entry ID
+        long count3 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 2L, masterTable.fetchId( ouSalesEntry.getDn() ).longValue() );
+        long count4 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 1, count4 - count3 );
+
+        // fetch IDs again, this time we hit the cache
+        long count5 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 1L, masterTable.fetchId( contextEntry.getDn() ).longValue() );
+        assertEquals( 2L, masterTable.fetchId( ouSalesEntry.getDn() ).longValue() );
+        long count6 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 0, count6 - count5 );
+
+        // check result after deleting the entry
+        masterTable.delete( 2L, ouSalesEntry );
+        assertNull( masterTable.fetchId( ouSalesEntry.getDn() ) );
+
+        // fetch entry IDs of non-existing entries
+        assertNull( masterTable.fetchId( null ) );
+        assertNull( masterTable.fetchId( new LdapDN( "a=b" ) ) );
+    }
+
+
+    @Test
+    public void testFetchParentId() throws Exception
+    {
+        // add first entry
+        DefaultServerEntry contextEntry = buildContextEntry();
+        masterTable.add( contextEntry );
+
+        // fetch parent ID
+        long count1 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 0L, masterTable.fetchParentId( 1L ).longValue() );
+        long count2 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 1, count2 - count1 );
+
+        // add second entry
+        DefaultServerEntry ouSalesEntry = buildOuSalesEntry();
+        masterTable.add( ouSalesEntry );
+
+        // fetch parent ID
+        long count3 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 1L, masterTable.fetchParentId( 2L ).longValue() );
+        long count4 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 1, count4 - count3 );
+
+        // fetch parents again, this time we hit the cache
+        long count5 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 0L, masterTable.fetchParentId( 1L ).longValue() );
+        assertEquals( 1L, masterTable.fetchParentId( 2L ).longValue() );
+        long count6 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 0, count6 - count5 );
+
+        // check result after deleting the entry
+        masterTable.delete( 2L, ouSalesEntry );
+        assertNull( masterTable.fetchParentId( 2L ) );
+
+        // fetch parent IDs of non-existing entries
+        assertNull( masterTable.fetchParentId( null ) );
+        assertNull( masterTable.fetchParentId( 0L ) );
+        assertNull( masterTable.fetchParentId( 10L ) );
+        assertNull( masterTable.fetchParentId( -1L ) );
+    }
+
+
+    @Test
+    public void testFetchEntry() throws Exception
+    {
+        // add first entry
+        DefaultServerEntry contextEntry = buildContextEntry();
+        masterTable.add( contextEntry );
+
+        // fetch first entry
+        long count1 = HBaseTableHelper.RPC_COUNT;
+        assertNotNull( masterTable.fetchEntry( 1L ) );
+        assertEquals( "o=Good Times Co.", masterTable.fetchEntry( 1L ).getDn().getName() );
+        assertEquals( contextEntry, masterTable.fetchEntry( 1L ) );
+        long count2 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 1, count2 - count1 );
+
+        // add second entry
+        DefaultServerEntry ouSalesEntry = buildOuSalesEntry();
+        masterTable.add( ouSalesEntry );
+
+        // fetch second entry
+        long count3 = HBaseTableHelper.RPC_COUNT;
+        assertNotNull( masterTable.fetchEntry( 2L ) );
+        assertEquals( "ou=Sales,o=Good Times Co.", masterTable.fetchEntry( 2L ).getDn().getName() );
+        assertEquals( ouSalesEntry, masterTable.fetchEntry( 2L ) );
+        long count4 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 1, count4 - count3 );
+
+        // fetch entries again, this time we hit the cache
+        long count5 = HBaseTableHelper.RPC_COUNT;
+        assertNotNull( masterTable.fetchEntry( 1L ) );
+        assertNotNull( masterTable.fetchEntry( 2L ) );
+        long count6 = HBaseTableHelper.RPC_COUNT;
+        assertEquals( 0, count6 - count5 );
+
+        // check result after deleting the entry
+        masterTable.delete( 2L, ouSalesEntry );
+        assertNull( masterTable.fetchEntry( 2L ) );
+
+        // fetch non-existing entries
+        assertNull( masterTable.fetchEntry( null ) );
+        assertNull( masterTable.fetchEntry( 0L ) );
+        assertNull( masterTable.fetchEntry( 10L ) );
+        assertNull( masterTable.fetchEntry( -1L ) );
+    }
+
+
+    @Test
+    public void testOneAndSubLevelCount() throws Exception
+    {
+        // add context entry
+        DefaultServerEntry contextEntry = buildContextEntry();
+        masterTable.add( contextEntry );
+
+        // check counters
+        assertEquals( 0, masterTable.getOneLevelCount( 1L ) );
+        assertEquals( 0, masterTable.getSubLevelCount( 1L ) );
+
+        // add ou=Sales entry
+        DefaultServerEntry ouSalesEntry = buildOuSalesEntry();
+        masterTable.add( ouSalesEntry );
+
+        // check counters
+        assertEquals( 0, masterTable.getOneLevelCount( 2L ) );
+        assertEquals( 0, masterTable.getSubLevelCount( 2L ) );
+
+        // check updated counters of context entry
+        assertEquals( 1, masterTable.getOneLevelCount( 1L ) );
+        assertEquals( 1, masterTable.getSubLevelCount( 1L ) );
+
+        // add cn=Jonny Walker entry
+        DefaultServerEntry cnJohnnyWalkerEntry = buildCnJohnnyWalkerEntry();
+        masterTable.add( cnJohnnyWalkerEntry );
+
+        // check counters
+        assertEquals( 0, masterTable.getOneLevelCount( 3L ) );
+        assertEquals( 0, masterTable.getSubLevelCount( 3L ) );
+
+        // check updated counters of ou=Sales entry
+        assertEquals( 1, masterTable.getOneLevelCount( 2L ) );
+        assertEquals( 1, masterTable.getSubLevelCount( 2L ) );
+
+        // check updated counters of context entry
+        assertEquals( 1, masterTable.getOneLevelCount( 1L ) );
+        assertEquals( 2, masterTable.getSubLevelCount( 1L ) );
+
+        // delete cn=Jonny Walker entry
+        masterTable.delete( 3L, cnJohnnyWalkerEntry );
+
+        // check updated counters of ou=Sales entry
+        assertEquals( 0, masterTable.getOneLevelCount( 2L ) );
+        assertEquals( 0, masterTable.getSubLevelCount( 2L ) );
+
+        // check updated counters of context entry
+        assertEquals( 1, masterTable.getOneLevelCount( 1L ) );
+        assertEquals( 1, masterTable.getSubLevelCount( 1L ) );
+
+        // delete ou=Sales entry
+        masterTable.delete( 2L, ouSalesEntry );
+
+        // check updated counters of context entry
+        assertEquals( 0, masterTable.getOneLevelCount( 1L ) );
+        assertEquals( 0, masterTable.getSubLevelCount( 1L ) );
+
+        // test counts for non-existing entries
+        assertEquals( 0, masterTable.getOneLevelCount( null ) );
+        assertEquals( 0, masterTable.getSubLevelCount( null ) );
+        assertEquals( 0, masterTable.getOneLevelCount( 0L ) );
+        assertEquals( 0, masterTable.getSubLevelCount( 0L ) );
+        assertEquals( 0, masterTable.getOneLevelCount( 10L ) );
+        assertEquals( 0, masterTable.getSubLevelCount( 10L ) );
+        assertEquals( 0, masterTable.getOneLevelCount( -1L ) );
+        assertEquals( 0, masterTable.getSubLevelCount( -1L ) );
+    }
+
+
+    private DefaultServerEntry buildContextEntry() throws NamingException
+    {
+        DefaultServerEntry entry = new DefaultServerEntry( schemaManager, suffixDn );
+        entry.add( "objectClass", "organization" );
+        entry.add( "o", "Good Times Co." );
+        entry.add( "postalCode", "1" );
+        entry.add( "postOfficeBox", "1" );
+        entry.add( "entryCsn", CSN_FACTORY.newInstance().toString() );
+        entry.add( "entryUUID", UUID.randomUUID().toString() );
+        return entry;
+    }
+
+
+    private DefaultServerEntry buildOuSalesEntry() throws InvalidNameException, NamingException
+    {
+        LdapDN dn = new LdapDN( "ou=Sales,o=Good Times Co." );
+        dn.normalize( schemaManager.getNormalizerMapping() );
+        DefaultServerEntry entry = new DefaultServerEntry( schemaManager, dn );
+        entry.add( "objectClass", "top", "organizationalUnit" );
+        entry.add( "ou", "Sales" );
+        entry.add( "postalCode", "1" );
+        entry.add( "postOfficeBox", "1" );
+        entry.add( "entryCsn", CSN_FACTORY.newInstance().toString() );
+        entry.add( "entryUUID", UUID.randomUUID().toString() );
+        return entry;
+    }
+
+
+    private DefaultServerEntry buildCnJohnnyWalkerEntry() throws InvalidNameException, NamingException
+    {
+        LdapDN dn = new LdapDN( "cn=JOhnny WAlkeR,ou=Sales,o=Good Times Co." );
+        dn.normalize( schemaManager.getNormalizerMapping() );
+        DefaultServerEntry entry = new DefaultServerEntry( schemaManager, dn );
+        entry.add( "objectClass", "top", "person", "organizationalPerson" );
+        entry.add( "ou", "Sales" );
+        entry.add( "cn", "JOhnny WAlkeR" );
+        entry.add( "sn", "WAlkeR" );
+        entry.add( "postalCode", "3" );
+        entry.add( "postOfficeBox", "3" );
+        entry.add( "entryCsn", CSN_FACTORY.newInstance().toString() );
+        entry.add( "entryUUID", UUID.randomUUID().toString() );
+        return entry;
+    }
+
+
+    @Test
+    public void last() throws Exception
+    {
+    }
+
+}

Added: directory/sandbox/seelmann/hbase-partition/src/test/resources/hbase-site.xml
URL: http://svn.apache.org/viewvc/directory/sandbox/seelmann/hbase-partition/src/test/resources/hbase-site.xml?rev=902620&view=auto
==============================================================================
--- directory/sandbox/seelmann/hbase-partition/src/test/resources/hbase-site.xml (added)
+++ directory/sandbox/seelmann/hbase-partition/src/test/resources/hbase-site.xml Sun Jan 24 19:04:37 2010
@@ -0,0 +1,15 @@
+<?xml version="1.0"?>
+<configuration>
+<!-- 
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>hadoop2,hadoop3,hadoop4</value>
+  </property>
+  
+  <property>
+    <name>mapred.job.tracker</name>
+    <value>hadoop1:9001</value>
+  </property>
+ -->
+  
+</configuration>

Added: directory/sandbox/seelmann/hbase-partition/src/test/resources/log4j.properties
URL: http://svn.apache.org/viewvc/directory/sandbox/seelmann/hbase-partition/src/test/resources/log4j.properties?rev=902620&view=auto
==============================================================================
--- directory/sandbox/seelmann/hbase-partition/src/test/resources/log4j.properties (added)
+++ directory/sandbox/seelmann/hbase-partition/src/test/resources/log4j.properties Sun Jan 24 19:04:37 2010
@@ -0,0 +1,50 @@
+#############################################################################
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#############################################################################
+log4j.rootCategory=ERROR, stdout
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+
+#log4j.appender.R=org.apache.log4j.RollingFileAppender
+#log4j.appender.R.File=apacheds-rolling.log
+#
+#log4j.appender.R.MaxFileSize=1024KB
+## Keep some backup files
+#log4j.appender.R.MaxBackupIndex=5
+#
+#log4j.appender.R.layout=org.apache.log4j.PatternLayout
+#log4j.appender.R.layout.ConversionPattern=[%d{HH:mm:ss:SSS}] %p [%c{1}] - %m%n
+
+log4j.appender.stdout.layout.ConversionPattern=[%d{HH:mm:ss:SSS}] %p [%c{1}] - %m%n
+
+# with these we'll not get innundated when switching to DEBUG
+#log4j.logger.org.apache.directory.server.kerberos=DEBUG
+#log4j.logger.org.apache.directory.server.changepw=DEBUG
+#log4j.logger.org.apache.directory.server.ldap.handlers=DEBUG
+log4j.logger.org.apache.directory.shared.ldap.name=WARN
+log4j.logger.org.springframework=WARN
+log4j.logger.org.apache.directory.shared.codec=WARN
+log4j.logger.org.apache.directory.shared.asn1=WARN
+
+log4j.logger.org.apache.directory.server.schema.registries=WARN
+
+#log4j.logger.org.apache.directory.server.core.partition.hbase.table.HBaseTableHelper=DEBUG
+#log4j.logger.org.apache.directory.server.core.partition.hbase.table=TRACE
+#log4j.logger.org.apache.directory.server.core.partition.hbase.table.HBaseMasterTable$MasterTableKey=DEBUG
+#log4j.logger.org.apache.hadoop.hbase.client=DEBUG
+log4j.logger.org.apache.hadoop.metrics=FATAL
+log4j.logger.org.apache.hadoop.hbase.metrics=FATAL
\ No newline at end of file

Added: directory/sandbox/seelmann/hbase-partition/src/test/resources/mapred-site.xml
URL: http://svn.apache.org/viewvc/directory/sandbox/seelmann/hbase-partition/src/test/resources/mapred-site.xml?rev=902620&view=auto
==============================================================================
--- directory/sandbox/seelmann/hbase-partition/src/test/resources/mapred-site.xml (added)
+++ directory/sandbox/seelmann/hbase-partition/src/test/resources/mapred-site.xml Sun Jan 24 19:04:37 2010
@@ -0,0 +1,15 @@
+<?xml version="1.0"?>
+<configuration>
+<!-- 
+  <property>
+    <name>fs.default.name</name>
+    <value>hdfs://hadoop1:9000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker</name>
+    <value>hadoop1:9001</value>
+  </property>
+ -->
+  
+</configuration>