You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by up...@apache.org on 2015/08/22 02:30:59 UTC
[2/3] incubator-geode git commit: Making renames and deletes closer
to atomic
Making renames and deletes closer to atomic
It's important that when a file is renamed, the new file appears
atomically with all of the data - according to the LuceneDirectory
interface. It's also important that during a delete the file does not
start missing chunks before it is actually deleted.
These changes address those two issues. I've introduced a UUID for the
file that is used instead of the filename as part of the chunk id. When
a file is renamed, the new file just points to the same UUID. Deletes
delete the File object first and the chunks later.
There are still some dangling issues - namely that there can be dangling
chunks for files that have been deleted. But I think the more severe
issues with a lack of atomicity are resolved now.
I've added a couple of unit tests of partial renames and partial
deletes. These tests work by wrapping the underlying "region" with a
wrapper that throws a CacheClosedException after a certain number of
operations have happened - essentially mimicking a member being closed
or killed.
Project: http://git-wip-us.apache.org/repos/asf/incubator-geode/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-geode/commit/941a5656
Tree: http://git-wip-us.apache.org/repos/asf/incubator-geode/tree/941a5656
Diff: http://git-wip-us.apache.org/repos/asf/incubator-geode/diff/941a5656
Branch: refs/heads/feature/GEODE-11
Commit: 941a565658def4e4685d98f7ab877f286c07d702
Parents: da06e96
Author: Dan Smith <up...@apache.org>
Authored: Fri Aug 21 15:16:52 2015 -0700
Committer: Dan Smith <up...@apache.org>
Committed: Fri Aug 21 15:25:26 2015 -0700
----------------------------------------------------------------------
.../lucene/internal/filesystem/ChunkKey.java | 19 +--
.../cache/lucene/internal/filesystem/File.java | 2 +
.../lucene/internal/filesystem/FileSystem.java | 46 +++----
.../filesystem/FileSystemJUnitTest.java | 138 ++++++++++++++++++-
4 files changed, 167 insertions(+), 38 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/941a5656/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/ChunkKey.java
----------------------------------------------------------------------
diff --git a/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/ChunkKey.java b/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/ChunkKey.java
index 5564c02..ada0382 100644
--- a/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/ChunkKey.java
+++ b/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/ChunkKey.java
@@ -1,6 +1,7 @@
package com.gemstone.gemfire.cache.lucene.internal.filesystem;
import java.io.Serializable;
+import java.util.UUID;
/**
* The key for a single chunk on a file stored within a region.
@@ -9,19 +10,19 @@ public class ChunkKey implements Serializable {
private static final long serialVersionUID = 1L;
- String fileName;
+ UUID fileId;
int chunkId;
- ChunkKey(String fileName, int chunkId) {
- this.fileName = fileName;
+ ChunkKey(UUID fileName, int chunkId) {
+ this.fileId = fileName;
this.chunkId = chunkId;
}
/**
* @return the fileName
*/
- public String getFileName() {
- return fileName;
+ public UUID getFileId() {
+ return fileId;
}
/**
@@ -35,7 +36,7 @@ public class ChunkKey implements Serializable {
public int hashCode() {
final int prime = 31;
int result = 1;
- result = prime * result + fileName.hashCode();
+ result = prime * result + fileId.hashCode();
result = prime * result + chunkId;
return result;
}
@@ -55,11 +56,11 @@ public class ChunkKey implements Serializable {
if (chunkId != other.chunkId) {
return false;
}
- if (fileName == null) {
- if (other.fileName != null) {
+ if (fileId == null) {
+ if (other.fileId != null) {
return false;
}
- } else if (!fileName.equals(other.fileName)) {
+ } else if (!fileId.equals(other.fileId)) {
return false;
}
return true;
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/941a5656/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/File.java
----------------------------------------------------------------------
diff --git a/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/File.java b/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/File.java
index c286051..388e469 100644
--- a/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/File.java
+++ b/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/File.java
@@ -3,6 +3,7 @@ package com.gemstone.gemfire.cache.lucene.internal.filesystem;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.Serializable;
+import java.util.UUID;
/**
* A file that is stored in a gemfire region.
@@ -18,6 +19,7 @@ public class File implements Serializable {
int chunks = 0;
long created = System.currentTimeMillis();
long modified = created;
+ UUID id = UUID.randomUUID();
File(final FileSystem fileSystem, final String name) {
setFileSystem(fileSystem);
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/941a5656/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/FileSystem.java
----------------------------------------------------------------------
diff --git a/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/FileSystem.java b/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/FileSystem.java
index cebd2e5..ef69322 100644
--- a/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/FileSystem.java
+++ b/gemfire-lucene/src/main/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/FileSystem.java
@@ -53,16 +53,20 @@ public class FileSystem {
return file;
}
- public void deleteFile(final String name) {
+ public void deleteFile(final String name) throws FileNotFoundException {
// TODO locks?
// TODO - What is the state of the system if
// things crash in the middle of removing this file?
// Seems like a file will be left with some
// dangling chunks at the end of the file
+ File file = fileRegion.remove(name);
+ if(file == null) {
+ throw new FileNotFoundException(name);
+ }
// TODO consider removeAll with all ChunkKeys listed.
- final ChunkKey key = new ChunkKey(name, 0);
+ final ChunkKey key = new ChunkKey(file.id, 0);
while (true) {
// TODO consider mutable ChunkKey
if (null == chunkRegion.remove(key)) {
@@ -71,46 +75,32 @@ public class FileSystem {
}
key.chunkId++;
}
-
- fileRegion.remove(name);
}
-
+
public void renameFile(String source, String dest) throws IOException {
- final File destFile = createFile(dest);
-
- // TODO - What is the state of the system if
- // things crash in the middle of moving this file?
- // Seems like a file will be left with some
- // dangling chunks at the end of the file
-
final File sourceFile = fileRegion.get(source);
if (null == sourceFile) {
throw new FileNotFoundException(source);
}
-
+
+ final File destFile = createFile(dest);
+
destFile.chunks = sourceFile.chunks;
destFile.created = sourceFile.created;
destFile.length = sourceFile.length;
destFile.modified = sourceFile.modified;
-
- // TODO copy on write?
- final ChunkKey sourceKey = new ChunkKey(source, 0);
- while (true) {
- byte[] chunk = chunkRegion.remove(sourceKey);
- if (null == chunk) {
- // no more chunks
- break;
- }
- putChunk(destFile, sourceKey.chunkId, chunk);
- sourceKey.chunkId++;
- }
+ destFile.id = sourceFile.id;
- updateFile(destFile);
+ // TODO - What is the state of the system if
+ // things crash in the middle of moving this file?
+ // Seems like we will have two files pointing
+ // at the same data
+
fileRegion.remove(source);
}
byte[] getChunk(final File file, final int id) {
- final ChunkKey key = new ChunkKey(file.getName(), id);
+ final ChunkKey key = new ChunkKey(file.id, id);
//The file's metadata indicates that this chunk shouldn't
//exist. Purge all of the chunks that are larger than the file metadata
@@ -128,7 +118,7 @@ public class FileSystem {
}
public void putChunk(final File file, final int id, final byte[] chunk) {
- final ChunkKey key = new ChunkKey(file.getName(), id);
+ final ChunkKey key = new ChunkKey(file.id, id);
chunkRegion.put(key, chunk);
}
http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/941a5656/gemfire-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/FileSystemJUnitTest.java
----------------------------------------------------------------------
diff --git a/gemfire-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/FileSystemJUnitTest.java b/gemfire-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/FileSystemJUnitTest.java
index 130b6cc..f29236a 100644
--- a/gemfire-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/FileSystemJUnitTest.java
+++ b/gemfire-lucene/src/test/java/com/gemstone/gemfire/cache/lucene/internal/filesystem/FileSystemJUnitTest.java
@@ -12,6 +12,7 @@ import java.io.InputStream;
import java.io.OutputStream;
import java.lang.reflect.Method;
import java.util.Arrays;
+import java.util.Collections;
import java.util.HashSet;
import java.util.Random;
import java.util.concurrent.ConcurrentHashMap;
@@ -219,6 +220,141 @@ public class FileSystemJUnitTest {
assertArrayEquals(bytes, results);
}
}
+
+ /**
+ * Test what happens a file rename is aborted in the middle
+ * due to the a cache closed exception. The next member
+ * that uses those files should be able to clean up after
+ * the partial rename.
+ */
+ @Test
+ public void testPartialRename() throws IOException {
+
+ final CountOperations countOperations = new CountOperations();
+ //Create a couple of mock regions where we count the operations
+ //that happen to them. We will then use this to abort the rename
+ //in the middle.
+ ConcurrentHashMap<String, File> spyFileRegion = Mockito.mock(ConcurrentHashMap.class, new SpyWrapper(countOperations, fileRegion));
+ ConcurrentHashMap<ChunkKey, byte[]> spyChunkRegion = Mockito.mock(ConcurrentHashMap.class, new SpyWrapper(countOperations, chunkRegion));
+
+ system = new FileSystem(spyFileRegion, spyChunkRegion);
+
+ String name = "file";
+ File file = system.createFile(name);
+
+ ByteArrayOutputStream expected = new ByteArrayOutputStream();
+
+ //Make sure the file has a lot of chunks
+ for(int i=0; i < 10; i++) {
+ expected.write(writeRandomBytes(file));
+ }
+
+ String name2 = "file2";
+ countOperations.reset();
+
+ system.renameFile(name, name2);
+
+ assertTrue(2 <= countOperations.count);
+
+ countOperations.after((int) Math.ceil(countOperations.count / 2.0), new Runnable() {
+
+ @Override
+ public void run() {
+ throw new CacheClosedException();
+ }
+ });
+ countOperations.reset();
+
+ String name3 = "file3";
+ try {
+ system.renameFile(name2, name3);
+ fail("should have seen an error");
+ } catch(CacheClosedException e) {
+
+ }
+
+ system = new FileSystem(fileRegion, chunkRegion);
+
+ //This is not the ideal behavior. We are left
+ //with two duplicate files. However, we will still
+ //verify that neither file is corrupted.
+ assertEquals(2, system.listFileNames().size());
+ File sourceFile = system.getFile(name2);
+ File destFile = system.getFile(name3);
+
+ byte[] expectedBytes = expected.toByteArray();
+
+ assertContents(expectedBytes, sourceFile);
+ assertContents(expectedBytes, destFile);
+ }
+
+ /**
+ * Test what happens a file delete is aborted in the middle
+ * due to the a cache closed exception. The next member
+ * that uses those files should be able to clean up after
+ * the partial rename.
+ */
+ @Test
+ public void testPartialDelete() throws IOException {
+
+ final CountOperations countOperations = new CountOperations();
+ //Create a couple of mock regions where we count the operations
+ //that happen to them. We will then use this to abort the rename
+ //in the middle.
+ ConcurrentHashMap<String, File> spyFileRegion = Mockito.mock(ConcurrentHashMap.class, new SpyWrapper(countOperations, fileRegion));
+ ConcurrentHashMap<ChunkKey, byte[]> spyChunkRegion = Mockito.mock(ConcurrentHashMap.class, new SpyWrapper(countOperations, chunkRegion));
+
+ system = new FileSystem(spyFileRegion, spyChunkRegion);
+
+ String name1 = "file1";
+ String name2 = "file2";
+ File file1 = system.createFile(name1);
+ File file2 = system.createFile(name2);
+
+ ByteArrayOutputStream expected = new ByteArrayOutputStream();
+
+ //Make sure the file has a lot of chunks
+ for(int i=0; i < 10; i++) {
+ byte[] bytes = writeRandomBytes(file1);
+ writeBytes(file2, bytes);
+ expected.write(bytes);
+ }
+
+ countOperations.reset();
+
+ system.deleteFile(name1);
+
+ assertTrue(2 <= countOperations.count);
+
+ countOperations.after(countOperations.count / 2, new Runnable() {
+
+ @Override
+ public void run() {
+ throw new CacheClosedException();
+ }
+ });
+ countOperations.reset();
+
+ try {
+ system.deleteFile(name2);
+ fail("should have seen an error");
+ } catch(CacheClosedException e) {
+
+ }
+
+ system = new FileSystem(fileRegion, chunkRegion);
+
+ if(system.listFileNames().size() == 0) {
+ //File was deleted, but shouldn't have any dangling chunks at this point
+ assertEquals(Collections.EMPTY_SET, fileRegion.keySet());
+ //TODO - need to purge chunks of deleted files somehow.
+// assertEquals(Collections.EMPTY_SET, chunkRegion.keySet());
+ } else {
+ file2 = system.getFile(name2);
+ assertContents(expected.toByteArray(), file2);
+ }
+
+ }
private File getOrCreateFile(String name) throws IOException {
try {
@@ -304,7 +440,7 @@ public class FileSystemJUnitTest {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
count++;
- if(count >= limit) {
+ if(count > limit) {
limitAction.run();
}
return null;