You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by md...@apache.org on 2015/08/26 17:08:56 UTC

svn commit: r1697955 - /jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java

Author: mduerig
Date: Wed Aug 26 15:08:56 2015
New Revision: 1697955

URL: http://svn.apache.org/r1697955
Log:
OAK-3235: Deadlock when closing a concurrently used FileStore
Reverting changes from revision 1697368 as they cause SNFEs

Modified:
    jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java

Modified: jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java?rev=1697955&r1=1697954&r2=1697955&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java (original)
+++ jackrabbit/oak/trunk/oak-core/src/main/java/org/apache/jackrabbit/oak/plugins/segment/SegmentWriter.java Wed Aug 26 15:08:56 2015
@@ -176,95 +176,75 @@ public class SegmentWriter {
      * store. This is done automatically (called from prepare) when there is not
      * enough space for a record. It can also be called explicitly.
      */
-    public void flush() {
-        // Id of the segment to be written in the file store. If the segment id
-        // is not null, a segment will be written outside of the synchronized
-        // block.
-        SegmentId segmentId = null;
-
-        // Buffer containing segment data, and offset and length to locate the
-        // segment data into the buffer. These variable will be initialized in
-        // the synchronized block.
-        byte[] segmentBuffer = null;
-        int segmentOffset = 0;
-        int segmentLength = 0;
-
-        synchronized (this) {
-            if (length > 0) {
-                int refcount = segment.getRefCount();
-
-                int rootcount = roots.size();
-                buffer[Segment.ROOT_COUNT_OFFSET] = (byte) (rootcount >> 8);
-                buffer[Segment.ROOT_COUNT_OFFSET + 1] = (byte) rootcount;
-
-                int blobrefcount = blobrefs.size();
-                buffer[Segment.BLOBREF_COUNT_OFFSET] = (byte) (blobrefcount >> 8);
-                buffer[Segment.BLOBREF_COUNT_OFFSET + 1] = (byte) blobrefcount;
-
-                length = align(
-                        refcount * 16 + rootcount * 3 + blobrefcount * 2 + length,
-                        16);
-
-                checkState(length <= buffer.length);
-
-                int pos = refcount * 16;
-                if (pos + length <= buffer.length) {
-                    // the whole segment fits to the space *after* the referenced
-                    // segment identifiers we've already written, so we can safely
-                    // copy those bits ahead even if concurrent code is still
-                    // reading from that part of the buffer
-                    System.arraycopy(buffer, 0, buffer, buffer.length - length, pos);
-                    pos += buffer.length - length;
-                } else {
-                    // this might leave some empty space between the header and
-                    // the record data, but this case only occurs when the
-                    // segment is >252kB in size and the maximum overhead is <<4kB,
-                    // which is acceptable
-                    length = buffer.length;
-                }
-
-                for (Map.Entry<RecordId, RecordType> entry : roots.entrySet()) {
-                    int offset = entry.getKey().getOffset();
-                    buffer[pos++] = (byte) entry.getValue().ordinal();
-                    buffer[pos++] = (byte) (offset >> (8 + Segment.RECORD_ALIGN_BITS));
-                    buffer[pos++] = (byte) (offset >> Segment.RECORD_ALIGN_BITS);
-                }
-
-                for (RecordId blobref : blobrefs) {
-                    int offset = blobref.getOffset();
-                    buffer[pos++] = (byte) (offset >> (8 + Segment.RECORD_ALIGN_BITS));
-                    buffer[pos++] = (byte) (offset >> Segment.RECORD_ALIGN_BITS);
-                }
-
-                segmentId = segment.getSegmentId();
-                segmentBuffer = buffer;
-                segmentOffset = buffer.length - length;
-                segmentLength = length;
-
-                // Keep this segment in memory as it's likely to be accessed soon
-                ByteBuffer data;
-                if (buffer.length - length > 4096) {
-                    data = ByteBuffer.allocate(length);
-                    data.put(buffer, buffer.length - length, length);
-                    data.rewind();
-                } else {
-                    data = ByteBuffer.wrap(buffer, buffer.length - length, length);
-                }
-                tracker.setSegment(segmentId, new Segment(tracker, segmentId, data));
-
-                buffer = createNewBuffer(version);
-                roots.clear();
-                blobrefs.clear();
-                length = 0;
-                position = buffer.length;
-                segment = new Segment(tracker, buffer);
-                segment.getSegmentId().setSegment(segment);
+    public synchronized void flush() {
+        if (length > 0) {
+            int refcount = segment.getRefCount();
+
+            int rootcount = roots.size();
+            buffer[Segment.ROOT_COUNT_OFFSET] = (byte) (rootcount >> 8);
+            buffer[Segment.ROOT_COUNT_OFFSET + 1] = (byte) rootcount;
+
+            int blobrefcount = blobrefs.size();
+            buffer[Segment.BLOBREF_COUNT_OFFSET] = (byte) (blobrefcount >> 8);
+            buffer[Segment.BLOBREF_COUNT_OFFSET + 1] = (byte) blobrefcount;
+
+            length = align(
+                    refcount * 16 + rootcount * 3 + blobrefcount * 2 + length,
+                    16);
+
+            checkState(length <= buffer.length);
+
+            int pos = refcount * 16;
+            if (pos + length <= buffer.length) {
+                // the whole segment fits to the space *after* the referenced
+                // segment identifiers we've already written, so we can safely
+                // copy those bits ahead even if concurrent code is still
+                // reading from that part of the buffer
+                System.arraycopy(buffer, 0, buffer, buffer.length-length, pos);
+                pos += buffer.length - length;
+            } else {
+                // this might leave some empty space between the header and
+                // the record data, but this case only occurs when the
+                // segment is >252kB in size and the maximum overhead is <<4kB,
+                // which is acceptable
+                length = buffer.length;
             }
-        }
 
-        if (segmentId != null) {
-            log.debug("Writing data segment {} ({} bytes)", segmentId, segmentLength);
-            store.writeSegment(segmentId, segmentBuffer, segmentOffset, segmentLength);
+            for (Map.Entry<RecordId, RecordType> entry : roots.entrySet()) {
+                int offset = entry.getKey().getOffset();
+                buffer[pos++] = (byte) entry.getValue().ordinal();
+                buffer[pos++] = (byte) (offset >> (8 + Segment.RECORD_ALIGN_BITS));
+                buffer[pos++] = (byte) (offset >> Segment.RECORD_ALIGN_BITS);
+            }
+
+            for (RecordId blobref : blobrefs) {
+                int offset = blobref.getOffset();
+                buffer[pos++] = (byte) (offset >> (8 + Segment.RECORD_ALIGN_BITS));
+                buffer[pos++] = (byte) (offset >> Segment.RECORD_ALIGN_BITS);
+            }
+
+            SegmentId id = segment.getSegmentId();
+            log.debug("Writing data segment {} ({} bytes)", id, length);
+            store.writeSegment(id, buffer, buffer.length - length, length);
+
+            // Keep this segment in memory as it's likely to be accessed soon
+            ByteBuffer data;
+            if (buffer.length - length > 4096) {
+                data = ByteBuffer.allocate(length);
+                data.put(buffer, buffer.length - length, length);
+                data.rewind();
+            } else {
+                data = ByteBuffer.wrap(buffer, buffer.length - length, length);
+            }
+            tracker.setSegment(id, new Segment(tracker, id, data));
+
+            buffer = createNewBuffer(version);
+            roots.clear();
+            blobrefs.clear();
+            length = 0;
+            position = buffer.length;
+            segment = new Segment(tracker, buffer);
+            segment.getSegmentId().setSegment(segment);
         }
     }