You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by ju...@apache.org on 2013/02/23 06:45:53 UTC

svn commit: r1449271 - in /jackrabbit/oak/trunk/oak-core/src: main/java/org/apache/jackrabbit/oak/plugins/segment/ test/java/org/apache/jackrabbit/oak/plugins/segment/

Author: jukka
Date: Sat Feb 23 05:45:53 2013
New Revision: 1449271

URL: http://svn.apache.org/r1449271
Log:
OAK-632: SegmentMK: Efficient updates of flat nodes

Fix handling of worst-case scenario where all map keys have the same hash
Move duplicate code for writing map branches to a separate method

Modified:
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java
    jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordTest.java
    jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentSizeTest.java

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java?rev=1449271&r1=1449270&r2=1449271&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/MapRecord.java Sat Feb 23 05:45:53 2013
@@ -18,11 +18,14 @@ package org.apache.jackrabbit.oak.plugin
 
 import static com.google.common.base.Preconditions.checkElementIndex;
 import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Preconditions.checkPositionIndex;
 import static java.lang.Integer.highestOneBit;
 import static java.lang.Integer.numberOfTrailingZeros;
 
 import java.util.UUID;
 
+import com.google.common.base.Preconditions;
+
 abstract class MapRecord extends Record {
 
     /**
@@ -65,7 +68,6 @@ abstract class MapRecord extends Record 
         int head = segment.readInt(id.getOffset());
         int level = head >>> SIZE_BITS;
         int size = head & ((1 << SIZE_BITS) - 1);
-        System.out.println("R: " + size + " " + level);
         if (size > BUCKETS_PER_LEVEL && level < MAX_NUMBER_OF_LEVELS) {
             int bitmap = segment.readInt(id.getOffset() + 4);
             return new MapBranch(store, id, size, level, bitmap);
@@ -84,7 +86,7 @@ abstract class MapRecord extends Record 
         super(checkNotNull(id));
         this.store = checkNotNull(store);
         this.size = checkElementIndex(size, MAX_SIZE);
-        this.level = checkElementIndex(level, MAX_NUMBER_OF_LEVELS);
+        this.level = checkPositionIndex(level, MAX_NUMBER_OF_LEVELS);
     }
 
     protected Segment getSegment() {

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java?rev=1449271&r1=1449270&r2=1449271&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java Sat Feb 23 05:45:53 2013
@@ -175,7 +175,7 @@ public class SegmentWriter {
         writeInt((int) value);
     }
 
-    private synchronized MapLeaf writeLeaf(
+    private synchronized MapLeaf writeMapLeaf(
             int level, Collection<MapEntry> entries) {
         checkNotNull(entries);
 
@@ -208,6 +208,26 @@ public class SegmentWriter {
         return new MapLeaf(store, id, size, level);
     }
 
+    private MapRecord writeMapBranch(int level, int size, RecordId[] buckets) {
+        int bitmap = 0;
+        List<RecordId> ids = Lists.newArrayListWithCapacity(buckets.length);
+        for (int i = 0; i < buckets.length; i++) {
+            if (buckets[i] != null) {
+                bitmap |= 1 << i;
+                ids.add(buckets[i]);
+            }
+        }
+
+        RecordId mapId = prepare(8, ids);
+        writeInt((level << MapRecord.SIZE_BITS) | size);
+        writeInt(bitmap);
+        for (RecordId id : ids) {
+            writeRecordId(id);
+        }
+        return new MapBranch(store, mapId, size, level, bitmap);
+    }
+
+
     private synchronized RecordId writeListBucket(List<RecordId> bucket) {
         RecordId bucketId = prepare(0, bucket);
         for (RecordId id : bucket) {
@@ -243,7 +263,11 @@ public class SegmentWriter {
                         map.remove(entry.getName());
                     }
                 }
-                return writeMapBucket(null, map.values(), level);
+                if (map.isEmpty()) {
+                    return null;
+                } else {
+                    return writeMapBucket(null, map.values(), level);
+                }
             } else {
                 List<Collection<MapEntry>> buckets =
                         Lists.newArrayListWithCapacity(BUCKETS_PER_LEVEL);
@@ -272,51 +296,29 @@ public class SegmentWriter {
                     }
                 }
                 
-                List<RecordId> ids = Lists.newArrayList();
-                int bucketMap = 0;
-                for (int i = 0; i < BUCKETS_PER_LEVEL; i++) {
-                    if (bucketIds[i] != null) {
-                        ids.add(bucketIds[i]);
-                        bucketMap |= 1 << i;
-                    }
-                }
-
-                RecordId bucketId = prepare(12, ids);
-                writeInt((level << MapRecord.SIZE_BITS) | newSize);
-                writeInt(bucketMap);
-                for (RecordId id : ids) {
-                    writeRecordId(id);
-                }
-                return new MapBranch(store, bucketId, newSize, level, bucketMap);
+                return writeMapBranch(level, newSize, bucketIds);
             }
-        } else if (entries.size() <= MapRecord.BUCKETS_PER_LEVEL) {
-            return writeLeaf(level, entries);
+        } else if (entries.size() <= MapRecord.BUCKETS_PER_LEVEL
+                || level == MapRecord.MAX_NUMBER_OF_LEVELS) {
+            return writeMapLeaf(level, entries);
         } else {
-            List<MapEntry>[] buckets = new List[MapRecord.BUCKETS_PER_LEVEL];
+            List<MapEntry>[] lists = new List[MapRecord.BUCKETS_PER_LEVEL];
             for (MapEntry entry : entries) {
                 int bucketIndex = (entry.hashCode() >> shift) & mask;
-                if (buckets[bucketIndex] == null) {
-                    buckets[bucketIndex] = Lists.newArrayList();
+                if (lists[bucketIndex] == null) {
+                    lists[bucketIndex] = Lists.newArrayList();
                 }
-                buckets[bucketIndex].add(entry);
+                lists[bucketIndex].add(entry);
             }
 
-            List<RecordId> bucketIds = Lists.newArrayList();
-            int bucketMap = 0;
-            for (int i = 0; i < buckets.length; i++) {
-                if (buckets[i] != null) {
-                    bucketIds.add(writeMapBucket(null, buckets[i], level + 1).getRecordId());
-                    bucketMap |= 1 << i;
+            RecordId[] buckets = new RecordId[MapRecord.BUCKETS_PER_LEVEL];
+            for (int i = 0; i < lists.length; i++) {
+                if (lists[i] != null) {
+                    buckets[i] = writeMapBucket(null, lists[i], level + 1).getRecordId();
                 }
             }
 
-            RecordId bucketId = prepare(8, bucketIds);
-            writeInt((level << MapRecord.SIZE_BITS) | entries.size());
-            writeInt(bucketMap);
-            for (RecordId id : bucketIds) {
-                writeRecordId(id);
-            }
-            return new MapBranch(store, bucketId, entries.size(), level, bucketMap);
+            return writeMapBranch(level, entries.size(), buckets);
         }
     }
 

Modified: jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordTest.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordTest.java?rev=1449271&r1=1449270&r2=1449271&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordTest.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/RecordTest.java Sat Feb 23 05:45:53 2013
@@ -213,7 +213,6 @@ public class RecordTest {
 
         assertEquals(1000, many.size());
         iterator = many.getEntries().iterator();
-        System.out.println(MapRecord.readMap(store, many.getRecordId()));
         for (int i = 0; i < 1000; i++) {
             assertTrue(iterator.hasNext());
             assertEquals(blockId, iterator.next().getValue());
@@ -239,6 +238,29 @@ public class RecordTest {
     }
 
     @Test
+    public void testWorstCaseMap() {
+        RecordId blockId = writer.writeBlock(bytes, 0, bytes.length);
+        Map<String, RecordId> map = Maps.newHashMap();
+        char[] key = new char[2];
+        for (int i = 0; i <= MapRecord.BUCKETS_PER_LEVEL; i++) {
+            key[0] = (char) ('A' + i);
+            key[1] = (char) ('\u1000' - key[0] * 31);
+            map.put(new String(key), blockId);
+        }
+
+        MapRecord bad = writer.writeMap(null, map);
+        writer.flush();
+
+        assertEquals(map.size(), bad.size());
+        Iterator<MapEntry> iterator = bad.getEntries().iterator();
+        for (int i = 0; i < map.size(); i++) {
+            assertTrue(iterator.hasNext());
+            assertEquals('\u1000', iterator.next().getName().hashCode());
+        }
+        assertFalse(iterator.hasNext());
+    }
+
+    @Test
     public void testEmptyNode() {
         NodeState before = MemoryNodeState.EMPTY_NODE;
         NodeState after = writer.writeNode(before);

Modified: jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentSizeTest.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentSizeTest.java?rev=1449271&r1=1449270&r2=1449271&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentSizeTest.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/test/java/org/apache/jackrabbit/oak/plugins/segment/SegmentSizeTest.java Sat Feb 23 05:45:53 2013
@@ -163,7 +163,7 @@ public class SegmentSizeTest {
         state = writer.writeNode(builder.getNodeState());
         writer.flush();
         segment = store.readSegment(state.getRecordId().getSegmentId());
-        assertEquals(260, segment.getData().length);
+        assertEquals(252, segment.getData().length);
     }
 
     private int getSize(NodeBuilder builder) {