You are viewing a plain text version of this content. The canonical link for it is here.
Posted to oak-commits@jackrabbit.apache.org by to...@apache.org on 2018/10/04 11:07:29 UTC
svn commit: r1842797 - in /jackrabbit/oak/trunk/oak-segment-azure: pom.xml
src/test/java/org/apache/jackrabbit/oak/segment/split/SplitPersistenceBlobTest.java
Author: tomekr
Date: Thu Oct 4 11:07:29 2018
New Revision: 1842797
URL: http://svn.apache.org/viewvc?rev=1842797&view=rev
Log:
OAK-7805: getBinaryReferences() may return null when using the split persistence
- added test case created by @amitjain
Added:
jackrabbit/oak/trunk/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/split/SplitPersistenceBlobTest.java
Modified:
jackrabbit/oak/trunk/oak-segment-azure/pom.xml
Modified: jackrabbit/oak/trunk/oak-segment-azure/pom.xml
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-segment-azure/pom.xml?rev=1842797&r1=1842796&r2=1842797&view=diff
==============================================================================
--- jackrabbit/oak/trunk/oak-segment-azure/pom.xml (original)
+++ jackrabbit/oak/trunk/oak-segment-azure/pom.xml Thu Oct 4 11:07:29 2018
@@ -141,6 +141,12 @@
<!-- Test dependencies -->
<dependency>
<groupId>org.apache.jackrabbit</groupId>
+ <artifactId>oak-blob-plugins</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.jackrabbit</groupId>
<artifactId>oak-segment-tar</artifactId>
<version>${project.version}</version>
<classifier>tests</classifier>
Added: jackrabbit/oak/trunk/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/split/SplitPersistenceBlobTest.java
URL: http://svn.apache.org/viewvc/jackrabbit/oak/trunk/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/split/SplitPersistenceBlobTest.java?rev=1842797&view=auto
==============================================================================
--- jackrabbit/oak/trunk/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/split/SplitPersistenceBlobTest.java (added)
+++ jackrabbit/oak/trunk/oak-segment-azure/src/test/java/org/apache/jackrabbit/oak/segment/split/SplitPersistenceBlobTest.java Thu Oct 4 11:07:29 2018
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.jackrabbit.oak.segment.split;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.security.InvalidKeyException;
+import java.util.Random;
+import java.util.Set;
+
+import com.google.common.collect.Sets;
+import com.microsoft.azure.storage.StorageException;
+import org.apache.jackrabbit.oak.api.Blob;
+import org.apache.jackrabbit.oak.api.CommitFailedException;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.DataStoreBlobStore;
+import org.apache.jackrabbit.oak.plugins.blob.datastore.OakFileDataStore;
+import org.apache.jackrabbit.oak.segment.SegmentNodeStore;
+import org.apache.jackrabbit.oak.segment.SegmentNodeStoreBuilders;
+import org.apache.jackrabbit.oak.segment.azure.AzurePersistence;
+import org.apache.jackrabbit.oak.segment.azure.AzuriteDockerRule;
+import org.apache.jackrabbit.oak.segment.file.FileStore;
+import org.apache.jackrabbit.oak.segment.file.FileStoreBuilder;
+import org.apache.jackrabbit.oak.segment.file.InvalidFileStoreVersionException;
+import org.apache.jackrabbit.oak.segment.file.tar.TarPersistence;
+import org.apache.jackrabbit.oak.segment.spi.persistence.SegmentNodeStorePersistence;
+import org.apache.jackrabbit.oak.spi.blob.BlobStore;
+import org.apache.jackrabbit.oak.spi.commit.CommitInfo;
+import org.apache.jackrabbit.oak.spi.commit.EmptyHook;
+import org.apache.jackrabbit.oak.spi.state.NodeBuilder;
+import org.apache.jackrabbit.oak.spi.state.NodeStore;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static com.google.common.collect.Sets.newHashSet;
+import static org.junit.Assert.assertEquals;
+
+public class SplitPersistenceBlobTest {
+
+ @ClassRule
+ public static AzuriteDockerRule azurite = new AzuriteDockerRule();
+
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder(new File("target"));
+
+ private SegmentNodeStore base;
+
+ private SegmentNodeStore split;
+
+ private FileStore baseFileStore;
+
+ private FileStore splitFileStore;
+
+ private String baseBlobId;
+
+ private SegmentNodeStorePersistence splitPersistence;
+
+ @Before
+ public void setup() throws IOException, InvalidFileStoreVersionException, CommitFailedException, URISyntaxException, InvalidKeyException, StorageException {
+ SegmentNodeStorePersistence sharedPersistence =
+ new AzurePersistence(azurite.getContainer("oak-test").getDirectoryReference("oak"));
+ File dataStoreDir = new File(folder.getRoot(), "blobstore");
+ BlobStore blobStore = newBlobStore(dataStoreDir);
+
+ baseFileStore = FileStoreBuilder
+ .fileStoreBuilder(folder.newFolder())
+ .withCustomPersistence(sharedPersistence)
+ .withBlobStore(blobStore)
+ .build();
+ base = SegmentNodeStoreBuilders.builder(baseFileStore).build();
+
+ NodeBuilder builder = base.getRoot().builder();
+ builder.child("foo").child("bar").setProperty("version", "v1");
+ base.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+
+ baseBlobId = createLoad(base, baseFileStore).getContentIdentity();
+ baseFileStore.flush();
+ baseFileStore.close();
+
+ baseFileStore = FileStoreBuilder
+ .fileStoreBuilder(folder.newFolder())
+ .withCustomPersistence(sharedPersistence)
+ .withBlobStore(blobStore)
+ .build();
+ base = SegmentNodeStoreBuilders.builder(baseFileStore).build();
+
+ createLoad(base, baseFileStore).getContentIdentity();
+ baseFileStore.flush();
+
+ SegmentNodeStorePersistence localPersistence = new TarPersistence(folder.newFolder());
+ splitPersistence = new SplitPersistence(sharedPersistence, localPersistence);
+
+ splitFileStore = FileStoreBuilder
+ .fileStoreBuilder(folder.newFolder())
+ .withCustomPersistence(splitPersistence)
+ .withBlobStore(blobStore)
+ .build();
+ split = SegmentNodeStoreBuilders.builder(splitFileStore).build();
+ }
+
+ @After
+ public void tearDown() {
+ baseFileStore.close();
+ }
+
+ @Test
+ public void collectReferences()
+ throws IOException, CommitFailedException {
+ String blobId = createLoad(split, splitFileStore).getContentIdentity();
+
+ assertReferences(2, Sets.newHashSet(baseBlobId, blobId));
+ }
+
+ private static Blob createBlob(NodeStore nodeStore, int size) throws IOException {
+ byte[] data = new byte[size];
+ new Random().nextBytes(data);
+ return nodeStore.createBlob(new ByteArrayInputStream(data));
+ }
+
+ private static BlobStore newBlobStore(File directory) {
+ OakFileDataStore delegate = new OakFileDataStore();
+ delegate.setPath(directory.getAbsolutePath());
+ delegate.init(null);
+ return new DataStoreBlobStore(delegate);
+ }
+
+ private Blob createLoad(SegmentNodeStore store, FileStore fileStore)
+ throws IOException, CommitFailedException {
+ NodeBuilder builder = store.getRoot().builder();
+ Blob blob = createBlob(store, 18000);
+ builder.setProperty("bin", blob);
+ store.merge(builder, EmptyHook.INSTANCE, CommitInfo.EMPTY);
+ fileStore.flush();
+ return blob;
+ }
+
+ private void assertReferences(int count, Set<String> blobIds)
+ throws IOException {
+ Set<String> actualReferences = newHashSet();
+ splitFileStore.collectBlobReferences(actualReferences::add);
+ assertEquals("visible references different", count, actualReferences.size());
+ assertEquals("Binary reference returned should be same", blobIds, actualReferences);
+ }
+}