You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2015/01/09 03:44:20 UTC
[16/66] [abbrv] accumulo git commit: ACCUMULO-3451 Format master
branch (1.7.0-SNAPSHOT)
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/test/java/org/apache/accumulo/server/util/CloneTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/CloneTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/CloneTest.java
index e2d1ecb..ebb83bf 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/CloneTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/CloneTest.java
@@ -38,115 +38,115 @@ import org.apache.accumulo.core.security.Authorizations;
import org.apache.hadoop.io.Text;
public class CloneTest extends TestCase {
-
+
public void testNoFiles() throws Exception {
MockInstance mi = new MockInstance();
Connector conn = mi.getConnector("", new PasswordToken(""));
-
+
KeyExtent ke = new KeyExtent(new Text("0"), null, null);
Mutation mut = ke.getPrevRowUpdateMutation();
-
+
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value("/default_tablet".getBytes()));
-
+
BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
bw1.addMutation(mut);
-
+
bw1.close();
-
+
BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
MetadataTableUtil.initializeClone("0", "1", conn, bw2);
-
+
int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
-
+
assertEquals(0, rc);
-
+
// scan tables metadata entries and confirm the same
-
+
}
-
+
public void testFilesChange() throws Exception {
MockInstance mi = new MockInstance();
Connector conn = mi.getConnector("", new PasswordToken(""));
-
+
KeyExtent ke = new KeyExtent(new Text("0"), null, null);
Mutation mut = ke.getPrevRowUpdateMutation();
-
+
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value("/default_tablet".getBytes()));
mut.put(DataFileColumnFamily.NAME.toString(), "/default_tablet/0_0.rf", "1,200");
-
+
BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
bw1.addMutation(mut);
-
+
bw1.flush();
-
+
BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
MetadataTableUtil.initializeClone("0", "1", conn, bw2);
-
+
Mutation mut2 = new Mutation(ke.getMetadataEntry());
mut2.putDelete(DataFileColumnFamily.NAME.toString(), "/default_tablet/0_0.rf");
mut2.put(DataFileColumnFamily.NAME.toString(), "/default_tablet/1_0.rf", "2,300");
-
+
bw1.addMutation(mut2);
bw1.flush();
-
+
int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
-
+
assertEquals(1, rc);
-
+
rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
-
+
assertEquals(0, rc);
-
+
Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
-
+
HashSet<String> files = new HashSet<String>();
-
+
for (Entry<Key,Value> entry : scanner) {
if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME))
files.add(entry.getKey().getColumnQualifier().toString());
}
-
+
assertEquals(1, files.size());
assertTrue(files.contains("../0/default_tablet/1_0.rf"));
-
+
}
-
+
// test split where files of children are the same
public void testSplit1() throws Exception {
MockInstance mi = new MockInstance();
Connector conn = mi.getConnector("", new PasswordToken(""));
-
+
BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
bw1.addMutation(createTablet("0", null, null, "/default_tablet", "/default_tablet/0_0.rf"));
-
+
bw1.flush();
-
+
BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
MetadataTableUtil.initializeClone("0", "1", conn, bw2);
-
+
bw1.addMutation(createTablet("0", "m", null, "/default_tablet", "/default_tablet/0_0.rf"));
bw1.addMutation(createTablet("0", null, "m", "/t-1", "/default_tablet/0_0.rf"));
-
+
bw1.flush();
-
+
int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
-
+
assertEquals(0, rc);
-
+
Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
-
+
HashSet<String> files = new HashSet<String>();
-
+
int count = 0;
for (Entry<Key,Value> entry : scanner) {
if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
@@ -154,61 +154,61 @@ public class CloneTest extends TestCase {
count++;
}
}
-
+
assertEquals(1, count);
assertEquals(1, files.size());
assertTrue(files.contains("../0/default_tablet/0_0.rf"));
}
-
+
// test split where files of children differ... like majc and split occurred
public void testSplit2() throws Exception {
MockInstance mi = new MockInstance();
Connector conn = mi.getConnector("", new PasswordToken(""));
-
+
BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
bw1.addMutation(createTablet("0", null, null, "/default_tablet", "/default_tablet/0_0.rf"));
-
+
bw1.flush();
-
+
BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
MetadataTableUtil.initializeClone("0", "1", conn, bw2);
-
+
bw1.addMutation(createTablet("0", "m", null, "/default_tablet", "/default_tablet/1_0.rf"));
Mutation mut3 = createTablet("0", null, "m", "/t-1", "/default_tablet/1_0.rf");
mut3.putDelete(DataFileColumnFamily.NAME.toString(), "/default_tablet/0_0.rf");
bw1.addMutation(mut3);
-
+
bw1.flush();
-
+
int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
-
+
assertEquals(1, rc);
-
+
rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
-
+
assertEquals(0, rc);
-
+
Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
-
+
HashSet<String> files = new HashSet<String>();
-
+
int count = 0;
-
+
for (Entry<Key,Value> entry : scanner) {
if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
files.add(entry.getKey().getColumnQualifier().toString());
count++;
}
}
-
+
assertEquals(1, files.size());
assertEquals(2, count);
assertTrue(files.contains("../0/default_tablet/1_0.rf"));
}
-
+
private static Mutation deleteTablet(String tid, String endRow, String prevRow, String dir, String file) throws Exception {
KeyExtent ke = new KeyExtent(new Text(tid), endRow == null ? null : new Text(endRow), prevRow == null ? null : new Text(prevRow));
Mutation mut = new Mutation(ke.getMetadataEntry());
@@ -216,53 +216,53 @@ public class CloneTest extends TestCase {
TabletsSection.ServerColumnFamily.TIME_COLUMN.putDelete(mut);
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.putDelete(mut);
mut.putDelete(DataFileColumnFamily.NAME.toString(), file);
-
+
return mut;
}
-
+
private static Mutation createTablet(String tid, String endRow, String prevRow, String dir, String file) throws Exception {
KeyExtent ke = new KeyExtent(new Text(tid), endRow == null ? null : new Text(endRow), prevRow == null ? null : new Text(prevRow));
Mutation mut = ke.getPrevRowUpdateMutation();
-
+
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes()));
mut.put(DataFileColumnFamily.NAME.toString(), file, "10,200");
-
+
return mut;
}
-
+
// test two tablets splitting into four
public void testSplit3() throws Exception {
MockInstance mi = new MockInstance();
Connector conn = mi.getConnector("", new PasswordToken(""));
-
+
BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1"));
bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2"));
-
+
bw1.flush();
-
+
BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
MetadataTableUtil.initializeClone("0", "1", conn, bw2);
-
+
bw1.addMutation(createTablet("0", "f", null, "/d1", "/d1/file3"));
bw1.addMutation(createTablet("0", "m", "f", "/d3", "/d1/file1"));
bw1.addMutation(createTablet("0", "s", "m", "/d2", "/d2/file2"));
bw1.addMutation(createTablet("0", null, "s", "/d4", "/d2/file2"));
-
+
bw1.flush();
-
+
int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
-
+
assertEquals(0, rc);
-
+
Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
-
+
HashSet<String> files = new HashSet<String>();
-
+
int count = 0;
for (Entry<Key,Value> entry : scanner) {
if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
@@ -270,63 +270,63 @@ public class CloneTest extends TestCase {
count++;
}
}
-
+
assertEquals(2, count);
assertEquals(2, files.size());
assertTrue(files.contains("../0/d1/file1"));
assertTrue(files.contains("../0/d2/file2"));
}
-
+
// test cloned marker
public void testClonedMarker() throws Exception {
-
+
MockInstance mi = new MockInstance();
Connector conn = mi.getConnector("", new PasswordToken(""));
-
+
BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1"));
bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2"));
-
+
bw1.flush();
-
+
BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
MetadataTableUtil.initializeClone("0", "1", conn, bw2);
-
+
bw1.addMutation(deleteTablet("0", "m", null, "/d1", "/d1/file1"));
bw1.addMutation(deleteTablet("0", null, "m", "/d2", "/d2/file2"));
-
+
bw1.flush();
-
+
bw1.addMutation(createTablet("0", "f", null, "/d1", "/d1/file3"));
bw1.addMutation(createTablet("0", "m", "f", "/d3", "/d1/file1"));
bw1.addMutation(createTablet("0", "s", "m", "/d2", "/d2/file3"));
bw1.addMutation(createTablet("0", null, "s", "/d4", "/d4/file3"));
-
+
bw1.flush();
-
+
int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
-
+
assertEquals(1, rc);
-
+
bw1.addMutation(deleteTablet("0", "m", "f", "/d3", "/d1/file1"));
-
+
bw1.flush();
-
+
bw1.addMutation(createTablet("0", "m", "f", "/d3", "/d1/file3"));
-
+
bw1.flush();
-
+
rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
-
+
assertEquals(0, rc);
-
+
Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
scanner.setRange(new KeyExtent(new Text("1"), null, null).toMetadataRange());
-
+
HashSet<String> files = new HashSet<String>();
-
+
int count = 0;
for (Entry<Key,Value> entry : scanner) {
if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
@@ -334,42 +334,42 @@ public class CloneTest extends TestCase {
count++;
}
}
-
+
assertEquals(3, count);
assertEquals(3, files.size());
assertTrue(files.contains("../0/d1/file1"));
assertTrue(files.contains("../0/d2/file3"));
assertTrue(files.contains("../0/d4/file3"));
}
-
+
// test two tablets splitting into four
public void testMerge() throws Exception {
MockInstance mi = new MockInstance();
Connector conn = mi.getConnector("", new PasswordToken(""));
-
+
BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1"));
bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2"));
-
+
bw1.flush();
-
+
BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
+
MetadataTableUtil.initializeClone("0", "1", conn, bw2);
-
+
bw1.addMutation(deleteTablet("0", "m", null, "/d1", "/d1/file1"));
Mutation mut = createTablet("0", null, null, "/d2", "/d2/file2");
mut.put(DataFileColumnFamily.NAME.toString(), "/d1/file1", "10,200");
bw1.addMutation(mut);
-
+
bw1.flush();
-
+
try {
MetadataTableUtil.checkClone("0", "1", conn, bw2);
assertTrue(false);
} catch (TabletIterator.TabletDeletedException tde) {}
-
+
}
-
+
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/test/java/org/apache/accumulo/server/util/DefaultMapTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/DefaultMapTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/DefaultMapTest.java
index b68d412..3303d8a 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/DefaultMapTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/DefaultMapTest.java
@@ -16,15 +16,15 @@
*/
package org.apache.accumulo.server.util;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.accumulo.server.util.DefaultMap;
import org.junit.Test;
public class DefaultMapTest {
-
+
@Test
public void testDefaultMap() {
Integer value = new DefaultMap<String,Integer>(0).get("test");
@@ -33,15 +33,15 @@ public class DefaultMapTest {
value = new DefaultMap<String,Integer>(1).get("test");
assertNotNull(value);
assertEquals(new Integer(1), value);
-
+
AtomicInteger canConstruct = new DefaultMap<String,AtomicInteger>(new AtomicInteger(1)).get("test");
assertNotNull(canConstruct);
assertEquals(new AtomicInteger(0).get(), canConstruct.get());
-
+
DefaultMap<String,String> map = new DefaultMap<String,String>("");
assertEquals(map.get("foo"), "");
map.put("foo", "bar");
assertEquals(map.get("foo"), "bar");
}
-
+
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/test/java/org/apache/accumulo/server/util/FileInfoTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/FileInfoTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/FileInfoTest.java
index cd568cf..77fe8b9 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/FileInfoTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/FileInfoTest.java
@@ -16,11 +16,12 @@
*/
package org.apache.accumulo.server.util;
+import static org.junit.Assert.assertEquals;
+
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.server.util.FileUtil.FileInfo;
import org.junit.Before;
import org.junit.Test;
-import static org.junit.Assert.*;
public class FileInfoTest {
private Key key1;
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
index 90c6300..65db4b8 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/FileUtilTest.java
@@ -83,140 +83,140 @@ public class FileUtilTest {
@SuppressWarnings("deprecation")
@Test
public void testCleanupIndexOpWithDfsDir() throws IOException {
- // And a "unique" tmp directory for each volume
+ // And a "unique" tmp directory for each volume
File tmp1 = new File(accumuloDir, "tmp");
- tmp1.mkdirs();
- Path tmpPath1 = new Path(tmp1.toURI());
+ tmp1.mkdirs();
+ Path tmpPath1 = new Path(tmp1.toURI());
- HashMap<Property,String> testProps = new HashMap<Property,String>();
+ HashMap<Property,String> testProps = new HashMap<Property,String>();
testProps.put(Property.INSTANCE_DFS_DIR, accumuloDir.getAbsolutePath());
- AccumuloConfiguration testConf = new FileUtilTestConfiguration(testProps);
+ AccumuloConfiguration testConf = new FileUtilTestConfiguration(testProps);
VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
- FileUtil.cleanupIndexOp(testConf, tmpPath1, fs, new ArrayList<FileSKVIterator>());
+ FileUtil.cleanupIndexOp(testConf, tmpPath1, fs, new ArrayList<FileSKVIterator>());
- Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
+ Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
}
@Test
public void testCleanupIndexOpWithCommonParentVolume() throws IOException {
- File volumeDir = new File(accumuloDir, "volumes");
- volumeDir.mkdirs();
+ File volumeDir = new File(accumuloDir, "volumes");
+ volumeDir.mkdirs();
- // Make some directories to simulate multiple volumes
- File v1 = new File(volumeDir, "v1"), v2 = new File(volumeDir, "v2");
- v1.mkdirs();
- v2.mkdirs();
+ // Make some directories to simulate multiple volumes
+ File v1 = new File(volumeDir, "v1"), v2 = new File(volumeDir, "v2");
+ v1.mkdirs();
+ v2.mkdirs();
- // And a "unique" tmp directory for each volume
- File tmp1 = new File(v1, "tmp"), tmp2 = new File(v2, "tmp");
- tmp1.mkdirs();
- tmp2.mkdirs();
- Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
+ // And a "unique" tmp directory for each volume
+ File tmp1 = new File(v1, "tmp"), tmp2 = new File(v2, "tmp");
+ tmp1.mkdirs();
+ tmp2.mkdirs();
+ Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
- HashMap<Property,String> testProps = new HashMap<Property,String>();
- testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
+ HashMap<Property,String> testProps = new HashMap<Property,String>();
+ testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
- AccumuloConfiguration testConf = new FileUtilTestConfiguration(testProps);
- VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
+ AccumuloConfiguration testConf = new FileUtilTestConfiguration(testProps);
+ VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
- FileUtil.cleanupIndexOp(testConf, tmpPath1, fs, new ArrayList<FileSKVIterator>());
+ FileUtil.cleanupIndexOp(testConf, tmpPath1, fs, new ArrayList<FileSKVIterator>());
- Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
+ Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
- FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
+ FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
- Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
+ Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
}
@Test
public void testCleanupIndexOpWithCommonParentVolumeWithDepth() throws IOException {
- File volumeDir = new File(accumuloDir, "volumes");
- volumeDir.mkdirs();
+ File volumeDir = new File(accumuloDir, "volumes");
+ volumeDir.mkdirs();
- // Make some directories to simulate multiple volumes
- File v1 = new File(volumeDir, "v1"), v2 = new File(volumeDir, "v2");
- v1.mkdirs();
- v2.mkdirs();
+ // Make some directories to simulate multiple volumes
+ File v1 = new File(volumeDir, "v1"), v2 = new File(volumeDir, "v2");
+ v1.mkdirs();
+ v2.mkdirs();
- // And a "unique" tmp directory for each volume
- // Make sure we can handle nested directories (a single tmpdir with potentially multiple unique dirs)
- File tmp1 = new File(new File(v1, "tmp"), "tmp_1"), tmp2 = new File(new File(v2, "tmp"), "tmp_1");
- tmp1.mkdirs();
- tmp2.mkdirs();
- Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
+ // And a "unique" tmp directory for each volume
+ // Make sure we can handle nested directories (a single tmpdir with potentially multiple unique dirs)
+ File tmp1 = new File(new File(v1, "tmp"), "tmp_1"), tmp2 = new File(new File(v2, "tmp"), "tmp_1");
+ tmp1.mkdirs();
+ tmp2.mkdirs();
+ Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
- HashMap<Property,String> testProps = new HashMap<Property,String>();
- testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
+ HashMap<Property,String> testProps = new HashMap<Property,String>();
+ testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
- AccumuloConfiguration testConf = new FileUtilTestConfiguration(testProps);
- VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
+ AccumuloConfiguration testConf = new FileUtilTestConfiguration(testProps);
+ VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
- FileUtil.cleanupIndexOp(testConf, tmpPath1, fs, new ArrayList<FileSKVIterator>());
+ FileUtil.cleanupIndexOp(testConf, tmpPath1, fs, new ArrayList<FileSKVIterator>());
- Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
+ Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
- FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
+ FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
- Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
+ Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
}
@Test
public void testCleanupIndexOpWithoutCommonParentVolume() throws IOException {
- // Make some directories to simulate multiple volumes
- File v1 = new File(accumuloDir, "v1"), v2 = new File(accumuloDir, "v2");
- v1.mkdirs();
- v2.mkdirs();
+ // Make some directories to simulate multiple volumes
+ File v1 = new File(accumuloDir, "v1"), v2 = new File(accumuloDir, "v2");
+ v1.mkdirs();
+ v2.mkdirs();
- // And a "unique" tmp directory for each volume
- File tmp1 = new File(v1, "tmp"), tmp2 = new File(v2, "tmp");
- tmp1.mkdirs();
- tmp2.mkdirs();
- Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
+ // And a "unique" tmp directory for each volume
+ File tmp1 = new File(v1, "tmp"), tmp2 = new File(v2, "tmp");
+ tmp1.mkdirs();
+ tmp2.mkdirs();
+ Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
- HashMap<Property,String> testProps = new HashMap<Property,String>();
- testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
+ HashMap<Property,String> testProps = new HashMap<Property,String>();
+ testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
- AccumuloConfiguration testConf = new FileUtilTestConfiguration(testProps);
- VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
+ AccumuloConfiguration testConf = new FileUtilTestConfiguration(testProps);
+ VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
- FileUtil.cleanupIndexOp(testConf, tmpPath1, fs, new ArrayList<FileSKVIterator>());
+ FileUtil.cleanupIndexOp(testConf, tmpPath1, fs, new ArrayList<FileSKVIterator>());
- Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
+ Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
- FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
+ FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
- Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
+ Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
}
@Test
public void testCleanupIndexOpWithoutCommonParentVolumeWithDepth() throws IOException {
- // Make some directories to simulate multiple volumes
- File v1 = new File(accumuloDir, "v1"), v2 = new File(accumuloDir, "v2");
- v1.mkdirs();
- v2.mkdirs();
-
- // And a "unique" tmp directory for each volume
- // Make sure we can handle nested directories (a single tmpdir with potentially multiple unique dirs)
- File tmp1 = new File(new File(v1, "tmp"), "tmp_1"), tmp2 = new File(new File(v2, "tmp"), "tmp_1");
- tmp1.mkdirs();
- tmp2.mkdirs();
- Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
-
- HashMap<Property,String> testProps = new HashMap<Property,String>();
- testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
-
- AccumuloConfiguration testConf = new FileUtilTestConfiguration(testProps);
- VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
+ // Make some directories to simulate multiple volumes
+ File v1 = new File(accumuloDir, "v1"), v2 = new File(accumuloDir, "v2");
+ v1.mkdirs();
+ v2.mkdirs();
+
+ // And a "unique" tmp directory for each volume
+ // Make sure we can handle nested directories (a single tmpdir with potentially multiple unique dirs)
+ File tmp1 = new File(new File(v1, "tmp"), "tmp_1"), tmp2 = new File(new File(v2, "tmp"), "tmp_1");
+ tmp1.mkdirs();
+ tmp2.mkdirs();
+ Path tmpPath1 = new Path(tmp1.toURI()), tmpPath2 = new Path(tmp2.toURI());
+
+ HashMap<Property,String> testProps = new HashMap<Property,String>();
+ testProps.put(Property.INSTANCE_VOLUMES, v1.toURI().toString() + "," + v2.toURI().toString());
+
+ AccumuloConfiguration testConf = new FileUtilTestConfiguration(testProps);
+ VolumeManager fs = VolumeManagerImpl.getLocal(accumuloDir.getAbsolutePath());
- FileUtil.cleanupIndexOp(testConf, tmpPath1, fs, new ArrayList<FileSKVIterator>());
+ FileUtil.cleanupIndexOp(testConf, tmpPath1, fs, new ArrayList<FileSKVIterator>());
- Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
+ Assert.assertFalse("Expected " + tmp1 + " to be cleaned up but it wasn't", tmp1.exists());
- FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
+ FileUtil.cleanupIndexOp(testConf, tmpPath2, fs, new ArrayList<FileSKVIterator>());
- Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
+ Assert.assertFalse("Expected " + tmp2 + " to be cleaned up but it wasn't", tmp2.exists());
}
private static class FileUtilTestConfiguration extends AccumuloConfiguration {
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java
index 4db171e..355fa42 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/ReplicationTableUtilTest.java
@@ -62,7 +62,7 @@ import org.junit.Assert;
import org.junit.Test;
/**
- *
+ *
*/
public class ReplicationTableUtilTest {
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/test/java/org/apache/accumulo/server/util/TServerUtilsTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/TServerUtilsTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/TServerUtilsTest.java
index 75bf953..218d82c 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/TServerUtilsTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/TServerUtilsTest.java
@@ -16,6 +16,13 @@
*/
package org.apache.accumulo.server.util;
+import static org.easymock.EasyMock.createMock;
+import static org.easymock.EasyMock.createNiceMock;
+import static org.easymock.EasyMock.expect;
+import static org.easymock.EasyMock.replay;
+import static org.easymock.EasyMock.verify;
+import static org.junit.Assert.assertTrue;
+
import java.util.concurrent.ExecutorService;
import org.apache.accumulo.server.rpc.TServerUtils;
@@ -23,9 +30,6 @@ import org.apache.thrift.server.TServer;
import org.apache.thrift.transport.TServerSocket;
import org.junit.Test;
-import static org.junit.Assert.*;
-import static org.easymock.EasyMock.*;
-
public class TServerUtilsTest {
private static class TServerWithoutES extends TServer {
boolean stopCalled;
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
index 72ce334..0e81b79 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
@@ -39,64 +39,64 @@ import org.apache.accumulo.server.util.TabletIterator.TabletDeletedException;
import org.apache.hadoop.io.Text;
public class TabletIteratorTest extends TestCase {
-
+
class TestTabletIterator extends TabletIterator {
-
+
private Connector conn;
-
+
public TestTabletIterator(Connector conn) throws Exception {
super(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY), MetadataSchema.TabletsSection.getRange(), true, true);
this.conn = conn;
}
-
+
@Override
protected void resetScanner() {
try {
Scanner ds = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
Text tablet = new KeyExtent(new Text("0"), new Text("m"), null).getMetadataEntry();
ds.setRange(new Range(tablet, true, tablet, true));
-
+
Mutation m = new Mutation(tablet);
-
+
BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
for (Entry<Key,Value> entry : ds) {
Key k = entry.getKey();
m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), k.getTimestamp());
}
-
+
bw.addMutation(m);
-
+
bw.close();
-
+
} catch (Exception e) {
throw new RuntimeException(e);
}
-
+
super.resetScanner();
}
-
+
}
-
+
// simulate a merge happening while iterating over tablets
public void testMerge() throws Exception {
MockInstance mi = new MockInstance();
Connector conn = mi.getConnector("", new PasswordToken(""));
-
+
KeyExtent ke1 = new KeyExtent(new Text("0"), new Text("m"), null);
Mutation mut1 = ke1.getPrevRowUpdateMutation();
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut1, new Value("/d1".getBytes()));
-
+
KeyExtent ke2 = new KeyExtent(new Text("0"), null, null);
Mutation mut2 = ke2.getPrevRowUpdateMutation();
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut2, new Value("/d2".getBytes()));
-
+
BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
bw1.addMutation(mut1);
bw1.addMutation(mut2);
bw1.close();
-
+
TestTabletIterator tabIter = new TestTabletIterator(conn);
-
+
try {
while (tabIter.hasNext()) {
tabIter.next();
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/test/java/org/apache/accumulo/server/util/time/BaseRelativeTimeTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/time/BaseRelativeTimeTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/time/BaseRelativeTimeTest.java
index fdedd84..58760a4 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/time/BaseRelativeTimeTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/time/BaseRelativeTimeTest.java
@@ -16,34 +16,33 @@
*/
package org.apache.accumulo.server.util.time;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
-import org.apache.accumulo.server.util.time.BaseRelativeTime;
-import org.apache.accumulo.server.util.time.ProvidesTime;
import org.junit.Test;
public class BaseRelativeTimeTest {
-
+
static class BogusTime implements ProvidesTime {
public long value = 0;
-
+
public long currentTime() {
return value;
}
}
-
+
@Test
public void testMatchesTime() {
BogusTime bt = new BogusTime();
BogusTime now = new BogusTime();
now.value = bt.value = System.currentTimeMillis();
-
+
BaseRelativeTime brt = new BaseRelativeTime(now);
assertEquals(brt.currentTime(), now.value);
brt.updateTime(now.value);
assertEquals(brt.currentTime(), now.value);
}
-
+
@Test
public void testFutureTime() {
BogusTime advice = new BogusTime();
@@ -51,14 +50,14 @@ public class BaseRelativeTimeTest {
local.value = advice.value = System.currentTimeMillis();
// Ten seconds into the future
advice.value += 10000;
-
+
BaseRelativeTime brt = new BaseRelativeTime(local);
assertEquals(brt.currentTime(), local.value);
brt.updateTime(advice.value);
long once = brt.currentTime();
assertTrue(once < advice.value);
assertTrue(once > local.value);
-
+
for (int i = 0; i < 100; i++) {
brt.updateTime(advice.value);
}
@@ -66,7 +65,7 @@ public class BaseRelativeTimeTest {
assertTrue(many > once);
assertTrue("after much advice, relative time is still closer to local time", (advice.value - many) < (once - local.value));
}
-
+
@Test
public void testPastTime() {
BogusTime advice = new BogusTime();
@@ -74,7 +73,7 @@ public class BaseRelativeTimeTest {
local.value = advice.value = System.currentTimeMillis();
// Ten seconds into the past
advice.value -= 10000;
-
+
BaseRelativeTime brt = new BaseRelativeTime(local);
brt.updateTime(advice.value);
long once = brt.currentTime();
@@ -85,5 +84,5 @@ public class BaseRelativeTimeTest {
brt.updateTime(advice.value - 10000);
assertTrue("Time cannot go backwards", once <= twice);
}
-
+
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/base/src/test/java/org/apache/accumulo/server/util/time/SimpleTimerTest.java
----------------------------------------------------------------------
diff --git a/server/base/src/test/java/org/apache/accumulo/server/util/time/SimpleTimerTest.java b/server/base/src/test/java/org/apache/accumulo/server/util/time/SimpleTimerTest.java
index 0a59812..9bde842 100644
--- a/server/base/src/test/java/org/apache/accumulo/server/util/time/SimpleTimerTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/time/SimpleTimerTest.java
@@ -16,10 +16,14 @@
*/
package org.apache.accumulo.server.util.time;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+
import java.util.concurrent.atomic.AtomicInteger;
+
import org.junit.Before;
import org.junit.Test;
-import static org.junit.Assert.*;
public class SimpleTimerTest {
private static final long DELAY = 1000L;
@@ -55,6 +59,7 @@ public class SimpleTimerTest {
private static class Thrower implements Runnable {
boolean wasRun = false;
+
public void run() {
wasRun = true;
throw new IllegalStateException("You shall not pass");
@@ -94,6 +99,6 @@ public class SimpleTimerTest {
assertEquals(1, SimpleTimer.getInstanceThreadPoolSize());
SimpleTimer t2 = SimpleTimer.getInstance(2);
assertSame(t, t2);
- assertEquals(1, SimpleTimer.getInstanceThreadPoolSize()); // unchanged
+ assertEquals(1, SimpleTimer.getInstanceThreadPoolSize()); // unchanged
}
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
----------------------------------------------------------------------
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
index 56fbefe..f375328 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
@@ -50,7 +50,7 @@ import com.google.common.collect.Iterators;
import com.google.common.collect.PeekingIterator;
/**
- *
+ *
*/
public class GarbageCollectionAlgorithm {
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionEnvironment.java
----------------------------------------------------------------------
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionEnvironment.java b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionEnvironment.java
index 7f208d8..41ac204 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionEnvironment.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionEnvironment.java
@@ -36,13 +36,13 @@ import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.Sc
import org.apache.accumulo.core.replication.proto.Replication.Status;
/**
- *
+ *
*/
public interface GarbageCollectionEnvironment {
/**
* Return a list of paths to files and dirs which are candidates for deletion from a given table, {@link RootTable#NAME} or {@link MetadataTable#NAME}
- *
+ *
* @param continuePoint
* A row to resume from if a previous invocation was stopped due to finding an extremely large number of candidates to remove which would have
* exceeded memory limitations
@@ -52,28 +52,28 @@ public interface GarbageCollectionEnvironment {
/**
* Fetch a list of paths for all bulk loads in progress (blip) from a given table, {@link RootTable#NAME} or {@link MetadataTable#NAME}
- *
+ *
* @return The list of files for each bulk load currently in progress.
*/
Iterator<String> getBlipIterator() throws TableNotFoundException, AccumuloException, AccumuloSecurityException;
/**
* Fetches the references to files, {@link DataFileColumnFamily#NAME} or {@link ScanFileColumnFamily#NAME}, from tablets
- *
+ *
* @return An {@link Iterator} of {@link Entry}<{@link Key}, {@link Value}> which constitute a reference to a file.
*/
Iterator<Entry<Key,Value>> getReferenceIterator() throws TableNotFoundException, AccumuloException, AccumuloSecurityException;
/**
* Return the set of tableIDs for the given instance this GarbageCollector is running over
- *
+ *
* @return The valueSet for the table name to table id map.
*/
Set<String> getTableIDs();
/**
* Delete the given files from the provided {@link Map} of relative path to absolute path for each file that should be deleted
- *
+ *
* @param candidateMap
* A Map from relative path to absolute path for files to be deleted.
*/
@@ -81,7 +81,7 @@ public interface GarbageCollectionEnvironment {
/**
* Delete a table's directory if it is empty.
- *
+ *
* @param tableID
* The id of the table whose directory we are to operate on
*/
@@ -89,7 +89,7 @@ public interface GarbageCollectionEnvironment {
/**
* Increment the number of candidates for deletion for the current garbage collection run
- *
+ *
* @param i
* Value to increment the deletion candidates by
*/
@@ -97,7 +97,7 @@ public interface GarbageCollectionEnvironment {
/**
* Increment the number of files still in use for the current garbage collection run
- *
+ *
* @param i
* Value to increment the still-in-use count by.
*/
@@ -105,6 +105,7 @@ public interface GarbageCollectionEnvironment {
/**
* Determine if the given absolute file is still pending replication
+ *
* @return True if the file still needs to be replicated
*/
Iterator<Entry<String,Status>> getReplicationNeededIterator() throws AccumuloException, AccumuloSecurityException;
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
----------------------------------------------------------------------
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
index 93a9a49..db37c8b 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/SimpleGarbageCollector.java
@@ -716,8 +716,8 @@ public class SimpleGarbageCollector extends AccumuloServerContext implements Ifa
HostAndPort result = HostAndPort.fromParts(opts.getAddress(), port);
log.debug("Starting garbage collector listening on " + result);
try {
- return TServerUtils.startTServer(getConfiguration(), result, processor, this.getClass().getSimpleName(), "GC Monitor Service", 2,
- getConfiguration().getCount(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE), 1000, maxMessageSize, getServerSslParams(), 0).address;
+ return TServerUtils.startTServer(getConfiguration(), result, processor, this.getClass().getSimpleName(), "GC Monitor Service", 2, getConfiguration()
+ .getCount(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE), 1000, maxMessageSize, getServerSslParams(), 0).address;
} catch (Exception ex) {
log.fatal(ex, ex);
throw new RuntimeException(ex);
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java
----------------------------------------------------------------------
diff --git a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java
index 51b9596..286723d 100644
--- a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java
+++ b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java
@@ -42,7 +42,7 @@ import org.junit.Assert;
import org.junit.Test;
/**
- *
+ *
*/
public class GarbageCollectionTest {
static class TestGCE implements GarbageCollectionEnvironment {
@@ -345,7 +345,6 @@ public class GarbageCollectionTest {
assertRemoved(gce);
}
-
@Test
public void testCustomDirectories() throws Exception {
TestGCE gce = new TestGCE();
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorOptsTest.java
----------------------------------------------------------------------
diff --git a/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorOptsTest.java b/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorOptsTest.java
index d484741..b91784d 100644
--- a/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorOptsTest.java
+++ b/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorOptsTest.java
@@ -16,10 +16,11 @@
*/
package org.apache.accumulo.gc;
+import static org.junit.Assert.assertFalse;
+
import org.apache.accumulo.gc.SimpleGarbageCollector.Opts;
import org.junit.Before;
import org.junit.Test;
-import static org.junit.Assert.assertFalse;
public class SimpleGarbageCollectorOptsTest {
private Opts opts;
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/EventCoordinator.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/EventCoordinator.java b/server/master/src/main/java/org/apache/accumulo/master/EventCoordinator.java
index e2f32c4..ebff7ab 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/EventCoordinator.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/EventCoordinator.java
@@ -19,10 +19,10 @@ package org.apache.accumulo.master;
import org.apache.log4j.Logger;
public class EventCoordinator {
-
+
private static final Logger log = Logger.getLogger(EventCoordinator.class);
long eventCounter = 0;
-
+
synchronized long waitForEvents(long millis, long lastEvent) {
// Did something happen since the last time we waited?
if (lastEvent == eventCounter) {
@@ -37,27 +37,27 @@ public class EventCoordinator {
}
return eventCounter;
}
-
+
synchronized public void event(String msg, Object... args) {
log.info(String.format(msg, args));
eventCounter++;
notifyAll();
}
-
+
public Listener getListener() {
return new Listener();
}
-
+
public class Listener {
long lastEvent;
-
+
Listener() {
lastEvent = eventCounter;
}
-
+
public void waitForEvents(long millis) {
lastEvent = EventCoordinator.this.waitForEvents(millis, lastEvent);
}
}
-
+
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java b/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
index 5207745..d10a7ad 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/FateServiceHandler.java
@@ -31,12 +31,12 @@ import java.util.Map.Entry;
import java.util.Set;
import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.impl.CompactionStrategyConfigUtil;
-import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.client.NamespaceNotFoundException;
import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.client.impl.CompactionStrategyConfigUtil;
import org.apache.accumulo.core.client.impl.Namespaces;
import org.apache.accumulo.core.client.impl.TableOperationsImpl;
import org.apache.accumulo.core.client.impl.Tables;
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
index 802c967..35a2d10 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
@@ -94,30 +94,30 @@ class TabletGroupWatcher extends Daemon {
private final Master master;
final TabletStateStore store;
final TabletGroupWatcher dependentWatcher;
-
+
final TableStats stats = new TableStats();
-
+
TabletGroupWatcher(Master master, TabletStateStore store, TabletGroupWatcher dependentWatcher) {
this.master = master;
this.store = store;
this.dependentWatcher = dependentWatcher;
}
-
+
Map<Text,TableCounts> getStats() {
return stats.getLast();
}
-
+
TableCounts getStats(Text tableId) {
return stats.getLast(tableId);
}
-
+
@Override
public void run() {
-
+
Thread.currentThread().setName("Watching " + store.name());
int[] oldCounts = new int[TabletState.values().length];
EventCoordinator.Listener eventListener = this.master.nextEvent.getListener();
-
+
while (this.master.stillMaster()) {
// slow things down a little, otherwise we spam the logs when there are many wake-up events
UtilWaitThread.sleep(100);
@@ -133,27 +133,27 @@ class TabletGroupWatcher extends Daemon {
currentMerges.put(merge.getExtent().getTableId(), new MergeStats(merge));
}
}
-
+
// Get the current status for the current list of tservers
SortedMap<TServerInstance,TabletServerStatus> currentTServers = new TreeMap<TServerInstance,TabletServerStatus>();
for (TServerInstance entry : this.master.tserverSet.getCurrentServers()) {
currentTServers.put(entry, this.master.tserverStatus.get(entry));
}
-
+
if (currentTServers.size() == 0) {
eventListener.waitForEvents(Master.TIME_TO_WAIT_BETWEEN_SCANS);
continue;
}
-
+
// Don't move tablets to servers that are shutting down
SortedMap<TServerInstance,TabletServerStatus> destinations = new TreeMap<TServerInstance,TabletServerStatus>(currentTServers);
destinations.keySet().removeAll(this.master.serversToShutdown);
-
+
List<Assignment> assignments = new ArrayList<Assignment>();
List<Assignment> assigned = new ArrayList<Assignment>();
List<TabletLocationState> assignedToDeadServers = new ArrayList<TabletLocationState>();
Map<KeyExtent,TServerInstance> unassigned = new HashMap<KeyExtent,TServerInstance>();
-
+
int[] counts = new int[TabletState.values().length];
stats.begin();
// Walk through the tablets in our store, and work tablets
@@ -167,10 +167,10 @@ class TabletGroupWatcher extends Daemon {
// ignore entries for tables that do not exist in zookeeper
if (TableManager.getInstance().getTableState(tls.extent.getTableId().toString()) == null)
continue;
-
+
if (Master.log.isTraceEnabled())
Master.log.trace(tls + " walogs " + tls.walogs.size());
-
+
// Don't overwhelm the tablet servers with work
if (unassigned.size() + unloaded > Master.MAX_TSERVER_WORK_CHUNK * currentTServers.size()) {
flushChanges(destinations, assignments, assigned, assignedToDeadServers, unassigned);
@@ -199,12 +199,12 @@ class TabletGroupWatcher extends Daemon {
mergeStats.update(tls.extent, state, tls.chopped, !tls.walogs.isEmpty());
sendChopRequest(mergeStats.getMergeInfo(), state, tls);
sendSplitRequest(mergeStats.getMergeInfo(), state, tls);
-
+
// Always follow through with assignments
if (state == TabletState.ASSIGNED) {
goal = TabletGoalState.HOSTED;
}
-
+
// if we are shutting down all the tabletservers, we have to do it in order
if (goal == TabletGoalState.UNASSIGNED && state == TabletState.HOSTED) {
if (this.master.serversToShutdown.equals(currentTServers.keySet())) {
@@ -213,7 +213,7 @@ class TabletGroupWatcher extends Daemon {
}
}
}
-
+
if (goal == TabletGoalState.HOSTED) {
if (state != TabletState.HOSTED && !tls.walogs.isEmpty()) {
if (this.master.recoveryManager.recoverLogs(tls.extent, tls.walogs))
@@ -275,12 +275,12 @@ class TabletGroupWatcher extends Daemon {
}
counts[state.ordinal()]++;
}
-
+
flushChanges(destinations, assignments, assigned, assignedToDeadServers, unassigned);
-
+
// provide stats after flushing changes to avoid race conditions w/ delete table
stats.end();
-
+
// Report changes
for (TabletState state : TabletState.values()) {
int i = state.ordinal();
@@ -293,14 +293,14 @@ class TabletGroupWatcher extends Daemon {
if (totalUnloaded > 0) {
this.master.nextEvent.event("[%s]: %d tablets unloaded", store.name(), totalUnloaded);
}
-
+
updateMergeState(mergeStatsCache);
-
+
Master.log.debug(String.format("[%s] sleeping for %.2f seconds", store.name(), Master.TIME_TO_WAIT_BETWEEN_SCANS / 1000.));
eventListener.waitForEvents(Master.TIME_TO_WAIT_BETWEEN_SCANS);
} catch (Exception ex) {
Master.log.error("Error processing table state for store " + store.name(), ex);
- if (ex.getCause() != null && ex.getCause() instanceof BadLocationStateException) {
+ if (ex.getCause() != null && ex.getCause() instanceof BadLocationStateException) {
repairMetadata(((BadLocationStateException) ex.getCause()).getEncodedEndRow());
} else {
UtilWaitThread.sleep(Master.WAIT_BETWEEN_ERRORS);
@@ -316,15 +316,15 @@ class TabletGroupWatcher extends Daemon {
}
}
}
-
+
private void repairMetadata(Text row) {
Master.log.debug("Attempting repair on " + row);
// ACCUMULO-2261 if a dying tserver writes a location before its lock information propagates, it may cause duplicate assignment.
// Attempt to find the dead server entry and remove it.
try {
- Map<Key, Value> future = new HashMap<Key, Value>();
- Map<Key, Value> assigned = new HashMap<Key, Value>();
- KeyExtent extent = new KeyExtent(row, new Value(new byte[]{0}));
+ Map<Key,Value> future = new HashMap<Key,Value>();
+ Map<Key,Value> assigned = new HashMap<Key,Value>();
+ KeyExtent extent = new KeyExtent(row, new Value(new byte[] {0}));
String table = MetadataTable.NAME;
if (extent.isMeta())
table = RootTable.NAME;
@@ -349,9 +349,9 @@ class TabletGroupWatcher extends Daemon {
Master.log.info("Attempted a repair, but nothing seems to be obviously wrong. " + assigned + " " + future);
return;
}
- Iterator<Entry<Key, Value>> iter = Iterators.concat(future.entrySet().iterator(), assigned.entrySet().iterator());
+ Iterator<Entry<Key,Value>> iter = Iterators.concat(future.entrySet().iterator(), assigned.entrySet().iterator());
while (iter.hasNext()) {
- Entry<Key, Value> entry = iter.next();
+ Entry<Key,Value> entry = iter.next();
TServerInstance alive = master.tserverSet.find(entry.getValue().toString());
if (alive == null) {
Master.log.info("Removing entry " + entry);
@@ -376,7 +376,7 @@ class TabletGroupWatcher extends Daemon {
}
return result;
}
-
+
private void sendSplitRequest(MergeInfo info, TabletState state, TabletLocationState tls) {
// Already split?
if (!info.getState().equals(MergeState.SPLITTING))
@@ -416,7 +416,7 @@ class TabletGroupWatcher extends Daemon {
}
}
}
-
+
private void sendChopRequest(MergeInfo info, TabletState state, TabletLocationState tls) {
// Don't bother if we're in the wrong state
if (!info.getState().equals(MergeState.WAITING_FOR_CHOPPED))
@@ -443,7 +443,7 @@ class TabletGroupWatcher extends Daemon {
}
}
}
-
+
private void updateMergeState(Map<Text,MergeStats> mergeStatsCache) {
for (MergeStats stats : mergeStatsCache.values()) {
try {
@@ -457,7 +457,7 @@ class TabletGroupWatcher extends Daemon {
if (update != stats.getMergeInfo().getState()) {
this.master.setMergeState(stats.getMergeInfo(), update);
}
-
+
if (update == MergeState.MERGING) {
try {
if (stats.getMergeInfo().isDelete()) {
@@ -475,7 +475,7 @@ class TabletGroupWatcher extends Daemon {
}
}
}
-
+
private void deleteTablets(MergeInfo info) throws AccumuloException {
KeyExtent extent = info.getExtent();
String targetSystemTable = extent.isMeta() ? RootTable.NAME : MetadataTable.NAME;
@@ -537,7 +537,7 @@ class TabletGroupWatcher extends Daemon {
} finally {
bw.close();
}
-
+
if (followingTablet != null) {
Master.log.debug("Updating prevRow of " + followingTablet + " to " + extent.getPrevEndRow());
bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
@@ -553,15 +553,15 @@ class TabletGroupWatcher extends Daemon {
} else {
// Recreate the default tablet to hold the end of the table
Master.log.debug("Recreating the last tablet to point to " + extent.getPrevEndRow());
- String tdir = master.getFileSystem().choose(Optional.of(extent.getTableId().toString()), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId()
- + Constants.DEFAULT_TABLET_LOCATION;
+ String tdir = master.getFileSystem().choose(Optional.of(extent.getTableId().toString()), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR
+ + Path.SEPARATOR + extent.getTableId() + Constants.DEFAULT_TABLET_LOCATION;
MetadataTableUtil.addTablet(new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir, master, timeType, this.master.masterLock);
}
} catch (Exception ex) {
throw new AccumuloException(ex);
}
}
-
+
private void mergeMetadataRecords(MergeInfo info) throws AccumuloException {
KeyExtent range = info.getExtent();
Master.log.debug("Merging metadata for " + range);
@@ -578,7 +578,7 @@ class TabletGroupWatcher extends Daemon {
if (range.isMeta()) {
targetSystemTable = RootTable.NAME;
}
-
+
BatchWriter bw = null;
try {
long fileCount = 0;
@@ -608,7 +608,7 @@ class TabletGroupWatcher extends Daemon {
bw.addMutation(MetadataTableUtil.createDeleteMutation(range.getTableId().toString(), entry.getValue().toString()));
}
}
-
+
// read the logical time from the last tablet in the merge range, it is not included in
// the loop above
scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
@@ -619,37 +619,37 @@ class TabletGroupWatcher extends Daemon {
maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, entry.getValue().toString());
}
}
-
+
if (maxLogicalTime != null)
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(maxLogicalTime.getBytes()));
-
+
if (!m.getUpdates().isEmpty()) {
bw.addMutation(m);
}
-
+
bw.flush();
-
+
Master.log.debug("Moved " + fileCount + " files to " + stop);
-
+
if (firstPrevRowValue == null) {
Master.log.debug("tablet already merged");
return;
}
-
+
stop.setPrevEndRow(KeyExtent.decodePrevEndRow(firstPrevRowValue));
Mutation updatePrevRow = stop.getPrevRowUpdateMutation();
Master.log.debug("Setting the prevRow for last tablet: " + stop);
bw.addMutation(updatePrevRow);
bw.flush();
-
+
deleteTablets(info, scanRange, bw, conn);
-
+
// Clean-up the last chopped marker
m = new Mutation(stopRow);
ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
bw.addMutation(m);
bw.flush();
-
+
} catch (Exception ex) {
throw new AccumuloException(ex);
} finally {
@@ -661,7 +661,7 @@ class TabletGroupWatcher extends Daemon {
}
}
}
-
+
private void deleteTablets(MergeInfo info, Range scanRange, BatchWriter bw, Connector conn) throws TableNotFoundException, MutationsRejectedException {
Scanner scanner;
Mutation m;
@@ -679,19 +679,19 @@ class TabletGroupWatcher extends Daemon {
while (row.hasNext()) {
Entry<Key,Value> entry = row.next();
Key key = entry.getKey();
-
+
if (m == null)
m = new Mutation(key.getRow());
-
+
m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
Master.log.debug("deleting entry " + key);
}
bw.addMutation(m);
}
-
+
bw.flush();
}
-
+
private KeyExtent getHighTablet(KeyExtent range) throws AccumuloException {
try {
Connector conn = this.master.getConnector();
@@ -713,7 +713,7 @@ class TabletGroupWatcher extends Daemon {
throw new AccumuloException("Unexpected failure finding the last tablet for a merge " + range, ex);
}
}
-
+
private void flushChanges(SortedMap<TServerInstance,TabletServerStatus> currentTServers, List<Assignment> assignments, List<Assignment> assigned,
List<TabletLocationState> assignedToDeadServers, Map<KeyExtent,TServerInstance> unassigned) throws DistributedStoreException, TException {
if (!assignedToDeadServers.isEmpty()) {
@@ -722,7 +722,7 @@ class TabletGroupWatcher extends Daemon {
store.unassign(assignedToDeadServers);
this.master.nextEvent.event("Marked %d tablets as unassigned because they don't have current servers", assignedToDeadServers.size());
}
-
+
if (!currentTServers.isEmpty()) {
Map<KeyExtent,TServerInstance> assignedOut = new HashMap<KeyExtent,TServerInstance>();
final StringBuilder builder = new StringBuilder(64);
@@ -764,7 +764,7 @@ class TabletGroupWatcher extends Daemon {
if (!unassigned.isEmpty() && assignedOut.isEmpty())
Master.log.warn("Load balancer failed to assign any tablets");
}
-
+
if (assignments.size() > 0) {
Master.log.info(String.format("Assigning %d tablets", assignments.size()));
store.setFutureLocations(assignments);
@@ -780,5 +780,5 @@ class TabletGroupWatcher extends Daemon {
master.assignedTablet(a.tablet);
}
}
-
+
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/metrics/ReplicationMetricsMBean.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/metrics/ReplicationMetricsMBean.java b/server/master/src/main/java/org/apache/accumulo/master/metrics/ReplicationMetricsMBean.java
index 84f8142..4d19126 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/metrics/ReplicationMetricsMBean.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/metrics/ReplicationMetricsMBean.java
@@ -17,32 +17,32 @@
package org.apache.accumulo.master.metrics;
/**
- *
+ *
*/
public interface ReplicationMetricsMBean {
-
+
/**
* A system may have multiple Replication targets, each of which have a queue of files to be replicated. This returns the sum across all targets, not
* de-duplicating files.
- *
+ *
* @return The number of files pending replication across all targets
*/
public int getNumFilesPendingReplication();
-
+
/**
* The total number of threads available to replicate data to peers. Each TabletServer has a number of threads devoted to replication, so this value is
* affected by the number of currently active TabletServers.
- *
+ *
* @return The number of threads available to replicate data across the instance
*/
public int getMaxReplicationThreads();
-
+
/**
* Peers are systems which data can be replicated to. This is the number of peers that are defined, but this is not necessarily the number of peers which are
* actively being replicated to.
- *
+ *
* @return The number of peers/targets which are defined for data to be replicated to.
*/
public int getNumConfiguredPeers();
-
+
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java b/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java
index c636dbb..b9211d2 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/recovery/RecoveryManager.java
@@ -115,8 +115,8 @@ public class RecoveryManager {
}
- private void initiateSort(String sortId, String source, final String destination, AccumuloConfiguration aconf)
- throws KeeperException, InterruptedException, IOException {
+ private void initiateSort(String sortId, String source, final String destination, AccumuloConfiguration aconf) throws KeeperException, InterruptedException,
+ IOException {
String work = source + "|" + destination;
new DistributedWorkQueue(ZooUtil.getRoot(master.getInstance()) + Constants.ZRECOVERY, aconf).addWork(sortId, work.getBytes(UTF_8));
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/replication/MasterReplicationCoordinator.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/MasterReplicationCoordinator.java b/server/master/src/main/java/org/apache/accumulo/master/replication/MasterReplicationCoordinator.java
index 73131c7..be8a264 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/MasterReplicationCoordinator.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/MasterReplicationCoordinator.java
@@ -65,10 +65,9 @@ public class MasterReplicationCoordinator implements ReplicationCoordinator.Ifac
this.security = SecurityOperation.getInstance(master, false);
}
-
@Override
public String getServicerAddress(String remoteTableId, TCredentials creds) throws ReplicationCoordinatorException, TException {
- try {
+ try {
security.authenticateUser(master.rpcCreds(), creds);
} catch (ThriftSecurityException e) {
log.error("{} failed to authenticate for replication to {}", creds.getPrincipal(), remoteTableId);
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java b/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
index 4b2936c..e30e9ac 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/SequentialWorkAssigner.java
@@ -36,11 +36,9 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
- * Creates work in ZK which is <code>filename.serialized_ReplicationTarget => filename</code>, but replicates
- * files in the order in which they were created.
+ * Creates work in ZK which is <code>filename.serialized_ReplicationTarget => filename</code>, but replicates files in the order in which they were created.
* <p>
- * The intent is to ensure that WALs are replayed in the same order on the peer in which
- * they were applied on the primary.
+ * The intent is to ensure that WALs are replayed in the same order on the peer in which they were applied on the primary.
*/
public class SequentialWorkAssigner extends DistributedWorkQueueWorkAssigner {
private static final Logger log = LoggerFactory.getLogger(SequentialWorkAssigner.class);
@@ -48,7 +46,7 @@ public class SequentialWorkAssigner extends DistributedWorkQueueWorkAssigner {
// @formatter:off
/*
- * {
+ * {
* peer1 => {sourceTableId1 => work_queue_key1, sourceTableId2 => work_queue_key2, ...}
* peer2 => {sourceTableId1 => work_queue_key1, sourceTableId3 => work_queue_key4, ...}
* ...
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java
index fbc9c80..3558d2d 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/replication/WorkDriver.java
@@ -63,7 +63,7 @@ public class WorkDriver extends Daemon {
log.error("Could not instantiate configured work assigner {}", workAssignerClass, e);
throw new RuntimeException(e);
}
-
+
this.assigner.configure(conf, conn);
this.assignerImplName = assigner.getClass().getName();
this.setName(assigner.getName());
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java b/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
index 4737b6e..8cdaf9f 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/state/MergeStats.java
@@ -56,7 +56,7 @@ public class MergeStats {
int total = 0;
boolean lowerSplit = false;
boolean upperSplit = false;
-
+
public MergeStats(MergeInfo info) {
this.info = info;
if (info.getState().equals(MergeState.NONE))
@@ -66,11 +66,11 @@ public class MergeStats {
if (info.getExtent().getPrevEndRow() == null)
lowerSplit = true;
}
-
+
public MergeInfo getMergeInfo() {
return info;
}
-
+
public void update(KeyExtent ke, TabletState state, boolean chopped, boolean hasWALs) {
if (info.getState().equals(MergeState.NONE))
return;
@@ -100,7 +100,7 @@ public class MergeStats {
if (state.equals(TabletState.UNASSIGNED))
this.unassigned++;
}
-
+
public MergeState nextMergeState(Connector connector, CurrentState master) throws Exception {
MergeState state = info.getState();
if (state == MergeState.NONE)
@@ -173,7 +173,7 @@ public class MergeStats {
}
return state;
}
-
+
private boolean verifyMergeConsistency(Connector connector, CurrentState master) throws TableNotFoundException, IOException {
MergeStats verify = new MergeStats(info);
KeyExtent extent = info.getExtent();
@@ -188,7 +188,7 @@ public class MergeStats {
Range range = new Range(first, false, null, true);
scanner.setRange(range);
KeyExtent prevExtent = null;
-
+
log.debug("Scanning range " + range);
for (Entry<Key,Value> entry : scanner) {
TabletLocationState tls;
@@ -202,31 +202,31 @@ public class MergeStats {
if (!tls.extent.getTableId().equals(tableId)) {
break;
}
-
+
if (!tls.walogs.isEmpty() && verify.getMergeInfo().needsToBeChopped(tls.extent)) {
log.debug("failing consistency: needs to be chopped" + tls.extent);
return false;
}
-
+
if (prevExtent == null) {
// this is the first tablet observed, it must be offline and its prev row must be less than the start of the merge range
if (tls.extent.getPrevEndRow() != null && tls.extent.getPrevEndRow().compareTo(start) > 0) {
log.debug("failing consistency: prev row is too high " + start);
return false;
}
-
+
if (tls.getState(master.onlineTabletServers()) != TabletState.UNASSIGNED) {
log.debug("failing consistency: assigned or hosted " + tls);
return false;
}
-
+
} else if (!tls.extent.isPreviousExtent(prevExtent)) {
log.debug("hole in " + MetadataTable.NAME);
return false;
}
-
+
prevExtent = tls.extent;
-
+
verify.update(tls.extent, tls.getState(master.onlineTabletServers()), tls.chopped, !tls.walogs.isEmpty());
// stop when we've seen the tablet just beyond our range
if (tls.extent.getPrevEndRow() != null && extent.getEndRow() != null && tls.extent.getPrevEndRow().compareTo(extent.getEndRow()) > 0) {
@@ -237,11 +237,11 @@ public class MergeStats {
+ verify.total);
return chopped == verify.chopped && unassigned == verify.unassigned && unassigned == verify.total;
}
-
+
public static void main(String[] args) throws Exception {
ClientOpts opts = new ClientOpts();
opts.parseArgs(MergeStats.class.getName(), args);
-
+
Connector conn = opts.getConnector();
Map<String,String> tableIdMap = conn.tableOperations().tableIdMap();
for (Entry<String,String> entry : tableIdMap.entrySet()) {
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/state/SetGoalState.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/SetGoalState.java b/server/master/src/main/java/org/apache/accumulo/master/state/SetGoalState.java
index bd65163..3442171 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/state/SetGoalState.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/state/SetGoalState.java
@@ -31,7 +31,7 @@ import org.apache.accumulo.server.security.SecurityUtil;
import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
public class SetGoalState {
-
+
/**
* Utility program that will change the goal state for the master from the command line.
*/
@@ -47,5 +47,5 @@ public class SetGoalState {
ZooReaderWriter.getInstance().putPersistentData(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZMASTER_GOAL_STATE, args[0].getBytes(UTF_8),
NodeExistsPolicy.OVERWRITE);
}
-
+
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/state/TableCounts.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/TableCounts.java b/server/master/src/main/java/org/apache/accumulo/master/state/TableCounts.java
index 4ebd745..73395ea 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/state/TableCounts.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/state/TableCounts.java
@@ -20,19 +20,19 @@ import org.apache.accumulo.server.master.state.TabletState;
public class TableCounts {
int counts[] = new int[TabletState.values().length];
-
+
public int unassigned() {
return counts[TabletState.UNASSIGNED.ordinal()];
}
-
+
public int assigned() {
return counts[TabletState.ASSIGNED.ordinal()];
}
-
+
public int assignedToDeadServers() {
return counts[TabletState.ASSIGNED_TO_DEAD_SERVER.ordinal()];
}
-
+
public int hosted() {
return counts[TabletState.HOSTED.ordinal()];
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/state/TableStats.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/state/TableStats.java b/server/master/src/main/java/org/apache/accumulo/master/state/TableStats.java
index f088a5d..127406c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/state/TableStats.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/state/TableStats.java
@@ -27,12 +27,12 @@ public class TableStats {
private Map<Text,TableCounts> next;
private long startScan = 0;
private long endScan = 0;
-
+
public synchronized void begin() {
next = new HashMap<Text,TableCounts>();
startScan = System.currentTimeMillis();
}
-
+
public synchronized void update(Text tableId, TabletState state) {
TableCounts counts = next.get(tableId);
if (counts == null) {
@@ -41,30 +41,30 @@ public class TableStats {
}
counts.counts[state.ordinal()]++;
}
-
+
public synchronized void end() {
last = next;
next = null;
endScan = System.currentTimeMillis();
}
-
+
public synchronized Map<Text,TableCounts> getLast() {
return last;
}
-
+
public synchronized TableCounts getLast(Text tableId) {
TableCounts result = last.get(tableId);
if (result == null)
return new TableCounts();
return result;
}
-
+
public synchronized long getScanTime() {
if (endScan <= startScan)
return System.currentTimeMillis() - startScan;
return endScan - startScan;
}
-
+
public synchronized long lastScanFinished() {
return endScan;
}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
index 049c9b3..c663686 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/BulkImport.java
@@ -141,7 +141,7 @@ public class BulkImport extends MasterRepo {
}
@Override
- //TODO Remove deprecation warning suppression when Hadoop1 support is dropped
+ // TODO Remove deprecation warning suppression when Hadoop1 support is dropped
@SuppressWarnings("deprecation")
public Repo<Master> call(long tid, Master master) throws Exception {
log.debug(" tid " + tid + " sourceDir " + sourceDir);
@@ -213,7 +213,7 @@ public class BulkImport extends MasterRepo {
}
}
- //TODO Remove deprecation warning suppression when Hadoop1 support is dropped
+ // TODO Remove deprecation warning suppression when Hadoop1 support is dropped
@SuppressWarnings("deprecation")
private String prepareBulkImport(Master master, final VolumeManager fs, String dir, String tableId) throws Exception {
final Path bulkDir = createNewBulkDir(fs, tableId);
@@ -288,7 +288,7 @@ public class BulkImport extends MasterRepo {
}));
}
workers.shutdown();
- while (!workers.awaitTermination(1000L, TimeUnit.MILLISECONDS)) { }
+ while (!workers.awaitTermination(1000L, TimeUnit.MILLISECONDS)) {}
for (Future<Exception> ex : results) {
if (ex.get() != null) {
@@ -456,8 +456,8 @@ class CopyFailed extends MasterRepo {
}
if (loadedFailures.size() > 0) {
- DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + master.getInstance().getInstanceID()
- + Constants.ZBULK_FAILED_COPYQ, master.getConfiguration());
+ DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ,
+ master.getConfiguration());
HashSet<String> workIds = new HashSet<String>();
@@ -575,8 +575,7 @@ class LoadFiles extends MasterRepo {
server = pair.getFirst();
List<String> attempt = Collections.singletonList(file);
log.debug("Asking " + pair.getFirst() + " to bulk import " + file);
- List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), master.rpcCreds(), tid, tableId, attempt,
- errorDir, setTime);
+ List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), master.rpcCreds(), tid, tableId, attempt, errorDir, setTime);
if (fail.isEmpty()) {
loaded.add(file);
} else {
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
index 7034e39..f1cf35c 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CloneTable.java
@@ -195,8 +195,8 @@ class ClonePermissions extends MasterRepo {
// give all table permissions to the creator
for (TablePermission permission : TablePermission.values()) {
try {
- AuditedSecurityOperation.getInstance(environment).grantTablePermission(environment.rpcCreds(), cloneInfo.user,
- cloneInfo.tableId, permission, cloneInfo.namespaceId);
+ AuditedSecurityOperation.getInstance(environment).grantTablePermission(environment.rpcCreds(), cloneInfo.user, cloneInfo.tableId, permission,
+ cloneInfo.namespaceId);
} catch (ThriftSecurityException e) {
Logger.getLogger(FinishCloneTable.class).error(e.getMessage(), e);
throw e;
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
index db8bbfe..fd7decf 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CompactRange.java
@@ -81,8 +81,7 @@ class CompactionDriver extends MasterRepo {
@Override
public long isReady(long tid, Master master) throws Exception {
- String zCancelID = Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId
- + Constants.ZTABLE_COMPACT_CANCEL_ID;
+ String zCancelID = Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_CANCEL_ID;
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
@@ -206,7 +205,6 @@ public class CompactRange extends MasterRepo {
private byte[] endRow;
private byte[] config;
-
public CompactRange(String tableId, byte[] startRow, byte[] endRow, List<IteratorSetting> iterators, CompactionStrategyConfig compactionStrategy)
throws ThriftTableOperationException {
this.tableId = tableId;
http://git-wip-us.apache.org/repos/asf/accumulo/blob/6bc67602/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
index 95c9f79..103eef8 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
@@ -177,8 +177,8 @@ class ChooseDir extends MasterRepo {
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
// Constants.DEFAULT_TABLET_LOCATION has a leading slash prepended to it so we don't need to add one here
- tableInfo.dir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableInfo.tableId
- + Constants.DEFAULT_TABLET_LOCATION;
+ tableInfo.dir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
+ + tableInfo.tableId + Constants.DEFAULT_TABLET_LOCATION;
return new CreateDir(tableInfo);
}