You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2013/07/03 20:32:53 UTC
svn commit: r1499510 [5/7] - in /accumulo/trunk:
core/src/main/java/org/apache/accumulo/core/client/
core/src/main/java/org/apache/accumulo/core/client/admin/
core/src/main/java/org/apache/accumulo/core/client/impl/
core/src/main/java/org/apache/accumu...
Copied: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java (from r1497021, accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java)
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java?p2=accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java&p1=accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java&r1=1497021&r2=1499510&rev=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java Wed Jul 3 18:32:51 2013
@@ -52,6 +52,16 @@ import org.apache.accumulo.core.data.Mut
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ClonedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.CredentialHelper;
import org.apache.accumulo.core.security.thrift.TCredentials;
@@ -59,7 +69,6 @@ import org.apache.accumulo.core.tabletse
import org.apache.accumulo.core.util.ColumnFQ;
import org.apache.accumulo.core.util.FastFormat;
import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.RootTable;
import org.apache.accumulo.core.util.StringUtil;
import org.apache.accumulo.core.util.UtilWaitThread;
import org.apache.accumulo.core.zookeeper.ZooUtil;
@@ -86,21 +95,21 @@ import org.apache.zookeeper.KeeperExcept
/**
* provides a reference to the metadata table for updates by tablet servers
*/
-public class MetadataTable extends org.apache.accumulo.core.util.MetadataTable {
+public class MetadataTableUtil {
private static final Text EMPTY_TEXT = new Text();
private static Map<TCredentials,Writer> root_tables = new HashMap<TCredentials,Writer>();
private static Map<TCredentials,Writer> metadata_tables = new HashMap<TCredentials,Writer>();
- private static final Logger log = Logger.getLogger(MetadataTable.class);
+ private static final Logger log = Logger.getLogger(MetadataTableUtil.class);
private static final int SAVE_ROOT_TABLET_RETRIES = 3;
- private MetadataTable() {}
+ private MetadataTableUtil() {}
public synchronized static Writer getMetadataTable(TCredentials credentials) {
Writer metadataTable = metadata_tables.get(credentials);
if (metadataTable == null) {
- metadataTable = new Writer(HdfsZooInstance.getInstance(), credentials, ID);
+ metadataTable = new Writer(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID);
metadata_tables.put(credentials, metadataTable);
}
return metadataTable;
@@ -116,7 +125,8 @@ public class MetadataTable extends org.a
}
public static void putLockID(ZooLock zooLock, Mutation m) {
- LOCK_COLUMN.put(m, new Value(zooLock.getLockID().serialize(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + "/").getBytes()));
+ TabletsSection.ServerColumnFamily.LOCK_COLUMN.put(m, new Value(zooLock.getLockID().serialize(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + "/")
+ .getBytes()));
}
public static void update(TCredentials credentials, Mutation m, KeyExtent extent) {
@@ -189,8 +199,8 @@ public class MetadataTable extends org.a
Mutation m = new Mutation(extent.getMetadataEntry());
if (dfv.getNumEntries() > 0) {
- m.put(DATAFILE_COLUMN_FAMILY, path.meta(), new Value(dfv.encode()));
- TIME_COLUMN.put(m, new Value(time.getBytes()));
+ m.put(DataFileColumnFamily.NAME, path.meta(), new Value(dfv.encode()));
+ TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes()));
// stuff in this location
TServerInstance self = getTServerInstance(address, zooLock);
self.putLastLocation(m);
@@ -200,17 +210,17 @@ public class MetadataTable extends org.a
}
if (unusedWalLogs != null) {
for (String entry : unusedWalLogs) {
- m.putDelete(LOG_COLUMN_FAMILY, new Text(entry));
+ m.putDelete(LogColumnFamily.NAME, new Text(entry));
}
}
for (FileRef scanFile : filesInUseByScans)
- m.put(SCANFILE_COLUMN_FAMILY, scanFile.meta(), new Value("".getBytes()));
+ m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value("".getBytes()));
if (mergeFile != null)
- m.putDelete(DATAFILE_COLUMN_FAMILY, mergeFile.meta());
+ m.putDelete(DataFileColumnFamily.NAME, mergeFile.meta());
- FLUSH_COLUMN.put(m, new Value((flushId + "").getBytes()));
+ TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value((flushId + "").getBytes()));
update(credentials, zooLock, m, extent);
@@ -232,7 +242,7 @@ public class MetadataTable extends org.a
public static void updateTabletFlushID(KeyExtent extent, long flushID, TCredentials credentials, ZooLock zooLock) {
if (!extent.isRootTablet()) {
Mutation m = new Mutation(extent.getMetadataEntry());
- FLUSH_COLUMN.put(m, new Value((flushID + "").getBytes()));
+ TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value((flushID + "").getBytes()));
update(credentials, zooLock, m, extent);
}
}
@@ -240,7 +250,7 @@ public class MetadataTable extends org.a
public static void updateTabletCompactID(KeyExtent extent, long compactID, TCredentials credentials, ZooLock zooLock) {
if (!extent.isRootTablet()) {
Mutation m = new Mutation(extent.getMetadataEntry());
- COMPACT_COLUMN.put(m, new Value((compactID + "").getBytes()));
+ TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value((compactID + "").getBytes()));
update(credentials, zooLock, m, extent);
}
}
@@ -252,18 +262,18 @@ public class MetadataTable extends org.a
for (Entry<FileRef,DataFileValue> entry : estSizes.entrySet()) {
Text file = entry.getKey().meta();
- m.put(DATAFILE_COLUMN_FAMILY, file, new Value(entry.getValue().encode()));
- m.put(BULKFILE_COLUMN_FAMILY, file, new Value(tidBytes));
+ m.put(DataFileColumnFamily.NAME, file, new Value(entry.getValue().encode()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, file, new Value(tidBytes));
}
- TIME_COLUMN.put(m, new Value(time.getBytes()));
+ TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes()));
update(credentials, zooLock, m, extent);
}
public static void addTablet(KeyExtent extent, String path, TCredentials credentials, char timeType, ZooLock lock) {
Mutation m = extent.getPrevRowUpdateMutation();
- DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
- TIME_COLUMN.put(m, new Value((timeType + "0").getBytes()));
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
+ TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value((timeType + "0").getBytes()));
update(credentials, lock, m, extent);
}
@@ -305,10 +315,10 @@ public class MetadataTable extends org.a
colq = key.getColumnQualifier(colq);
// interpret the row id as a key extent
- if (DIRECTORY_COLUMN.equals(colf, colq))
+ if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.equals(colf, colq))
datafile = new Text(val.toString());
- else if (PREV_ROW_COLUMN.equals(colf, colq))
+ else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(colf, colq))
prevRow = new Value(val);
if (datafile != null && prevRow != null) {
@@ -341,12 +351,12 @@ public class MetadataTable extends org.a
public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, TCredentials credentials) throws IOException {
TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();
- Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
- mdScanner.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
+ Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
+ mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
Text row = extent.getMetadataEntry();
VolumeManager fs = VolumeManagerImpl.get();
- Key endKey = new Key(row, DATAFILE_COLUMN_FAMILY, new Text(""));
+ Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text(""));
endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
mdScanner.setRange(new Range(new Key(row), endKey));
@@ -365,25 +375,25 @@ public class MetadataTable extends org.a
Map<FileRef,Long> bulkLoadedFiles, TCredentials credentials, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
Mutation m = extent.getPrevRowUpdateMutation();
- DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
- TIME_COLUMN.put(m, new Value(time.getBytes()));
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
+ TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes()));
if (lastFlushID > 0)
- FLUSH_COLUMN.put(m, new Value(("" + lastFlushID).getBytes()));
+ TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(("" + lastFlushID).getBytes()));
if (lastCompactID > 0)
- COMPACT_COLUMN.put(m, new Value(("" + lastCompactID).getBytes()));
+ TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + lastCompactID).getBytes()));
if (location != null) {
- m.put(CURRENT_LOCATION_COLUMN_FAMILY, location.asColumnQualifier(), location.asMutationValue());
- m.putDelete(FUTURE_LOCATION_COLUMN_FAMILY, location.asColumnQualifier());
+ m.put(TabletsSection.CurrentLocationColumnFamily.NAME, location.asColumnQualifier(), location.asMutationValue());
+ m.putDelete(TabletsSection.FutureLocationColumnFamily.NAME, location.asColumnQualifier());
}
for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
- m.put(DATAFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(entry.getValue().encode()));
+ m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
}
for (Entry<FileRef,Long> entry : bulkLoadedFiles.entrySet()) {
byte[] tidBytes = Long.toString(entry.getValue()).getBytes();
- m.put(BULKFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(tidBytes));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, entry.getKey().meta(), new Value(tidBytes));
}
update(credentials, zooLock, m, extent);
@@ -392,34 +402,34 @@ public class MetadataTable extends org.a
public static void rollBackSplit(Text metadataEntry, Text oldPrevEndRow, TCredentials credentials, ZooLock zooLock) {
KeyExtent ke = new KeyExtent(metadataEntry, oldPrevEndRow);
Mutation m = ke.getPrevRowUpdateMutation();
- SPLIT_RATIO_COLUMN.putDelete(m);
- OLD_PREV_ROW_COLUMN.putDelete(m);
+ TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
+ TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
}
public static void splitTablet(KeyExtent extent, Text oldPrevEndRow, double splitRatio, TCredentials credentials, ZooLock zooLock) {
Mutation m = extent.getPrevRowUpdateMutation(); //
- SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(splitRatio).getBytes()));
+ TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(splitRatio).getBytes()));
- OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(oldPrevEndRow));
- CHOPPED_COLUMN.putDelete(m);
+ TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(oldPrevEndRow));
+ ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
update(credentials, zooLock, m, extent);
}
public static void finishSplit(Text metadataEntry, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, TCredentials credentials,
ZooLock zooLock) {
Mutation m = new Mutation(metadataEntry);
- SPLIT_RATIO_COLUMN.putDelete(m);
- OLD_PREV_ROW_COLUMN.putDelete(m);
- CHOPPED_COLUMN.putDelete(m);
+ TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
+ TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
+ ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
- m.put(DATAFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(entry.getValue().encode()));
+ m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
}
for (FileRef pathToRemove : highDatafilesToRemove) {
- m.putDelete(DATAFILE_COLUMN_FAMILY, pathToRemove.meta());
+ m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
}
update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
@@ -448,16 +458,16 @@ public class MetadataTable extends org.a
Mutation m = new Mutation(extent.getMetadataEntry());
for (FileRef pathToRemove : datafilesToDelete)
- m.putDelete(DATAFILE_COLUMN_FAMILY, pathToRemove.meta());
+ m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
for (FileRef scanFile : scanFiles)
- m.put(SCANFILE_COLUMN_FAMILY, scanFile.meta(), new Value("".getBytes()));
+ m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value("".getBytes()));
if (size.getNumEntries() > 0)
- m.put(DATAFILE_COLUMN_FAMILY, path.meta(), new Value(size.encode()));
+ m.put(DataFileColumnFamily.NAME, path.meta(), new Value(size.encode()));
if (compactionId != null)
- COMPACT_COLUMN.put(m, new Value(("" + compactionId).getBytes()));
+ TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + compactionId).getBytes()));
TServerInstance self = getTServerInstance(address, zooLock);
self.putLastLocation(m);
@@ -484,8 +494,6 @@ public class MetadataTable extends org.a
}
public static Mutation createDeleteMutation(String tableId, String pathToRemove) throws IOException {
- String prefix = DELETED_RANGE.getStartKey().getRow().toString();
-
if (!pathToRemove.contains(":")) {
if (pathToRemove.startsWith("../"))
pathToRemove = pathToRemove.substring(2);
@@ -494,7 +502,7 @@ public class MetadataTable extends org.a
}
Path path = VolumeManagerImpl.get().getFullPath(ServerConstants.getTablesDirs(), pathToRemove);
- Mutation delFlag = new Mutation(new Text(prefix + path.toString()));
+ Mutation delFlag = new Mutation(new Text(MetadataSchema.DeletesSection.getRowPrefix() + path.toString()));
delFlag.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
return delFlag;
}
@@ -503,7 +511,7 @@ public class MetadataTable extends org.a
Mutation m = new Mutation(extent.getMetadataEntry());
for (FileRef pathToRemove : scanFiles)
- m.putDelete(SCANFILE_COLUMN_FAMILY, pathToRemove.meta());
+ m.putDelete(ScanFileColumnFamily.NAME, pathToRemove.meta());
update(credentials, zooLock, m, extent);
}
@@ -518,7 +526,7 @@ public class MetadataTable extends org.a
// check to see if prev tablet exist in metadata tablet
Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(table, metadataPrevEndRow)));
- ScannerImpl scanner2 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
+ ScannerImpl scanner2 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
VolumeManager fs = VolumeManagerImpl.get();
@@ -531,17 +539,17 @@ public class MetadataTable extends org.a
List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
- Scanner scanner3 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
+ Scanner scanner3 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
Key rowKey = new Key(metadataEntry);
SortedMap<FileRef,DataFileValue> origDatafileSizes = new TreeMap<FileRef,DataFileValue>();
SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<FileRef,DataFileValue>();
- scanner3.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
+ scanner3.fetchColumnFamily(DataFileColumnFamily.NAME);
scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
for (Entry<Key,Value> entry : scanner3) {
- if (entry.getKey().compareColumnFamily(DATAFILE_COLUMN_FAMILY) == 0) {
+ if (entry.getKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
}
}
@@ -549,7 +557,7 @@ public class MetadataTable extends org.a
splitDatafiles(table, metadataPrevEndRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes,
highDatafilesToRemove);
- MetadataTable.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, credentials, lock);
+ MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, credentials, lock);
return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
}
@@ -603,32 +611,32 @@ public class MetadataTable extends org.a
throws AccumuloException, IOException {
log.info("Incomplete split " + metadataEntry + " attempting to fix");
- Value oper = columns.get(OLD_PREV_ROW_COLUMN);
+ Value oper = columns.get(TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN);
- if (columns.get(SPLIT_RATIO_COLUMN) == null) {
+ if (columns.get(TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN) == null) {
throw new IllegalArgumentException("Metadata entry does not have split ratio (" + metadataEntry + ")");
}
- double splitRatio = Double.parseDouble(new String(columns.get(SPLIT_RATIO_COLUMN).get()));
+ double splitRatio = Double.parseDouble(new String(columns.get(TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN).get()));
- Value prevEndRowIBW = columns.get(PREV_ROW_COLUMN);
+ Value prevEndRowIBW = columns.get(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
if (prevEndRowIBW == null) {
throw new IllegalArgumentException("Metadata entry does not have prev row (" + metadataEntry + ")");
}
- Value time = columns.get(TIME_COLUMN);
+ Value time = columns.get(TabletsSection.ServerColumnFamily.TIME_COLUMN);
if (time == null) {
throw new IllegalArgumentException("Metadata entry does not have time (" + metadataEntry + ")");
}
- Value flushID = columns.get(FLUSH_COLUMN);
+ Value flushID = columns.get(TabletsSection.ServerColumnFamily.FLUSH_COLUMN);
long initFlushID = -1;
if (flushID != null)
initFlushID = Long.parseLong(flushID.toString());
- Value compactID = columns.get(COMPACT_COLUMN);
+ Value compactID = columns.get(TabletsSection.ServerColumnFamily.COMPACT_COLUMN);
long initCompactID = -1;
if (compactID != null)
initCompactID = Long.parseLong(compactID.toString());
@@ -641,9 +649,9 @@ public class MetadataTable extends org.a
}
public static void deleteTable(String tableId, boolean insertDeletes, TCredentials credentials, ZooLock lock) throws AccumuloException, IOException {
- Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
+ Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
Text tableIdText = new Text(tableId);
- BatchWriter bw = new BatchWriterImpl(HdfsZooInstance.getInstance(), credentials, ID, new BatchWriterConfig().setMaxMemory(1000000)
+ BatchWriter bw = new BatchWriterImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000)
.setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2));
// scan metadata for our table and delete everything we find
@@ -653,18 +661,18 @@ public class MetadataTable extends org.a
// insert deletes before deleting data from !METADATA... this makes the code fault tolerant
if (insertDeletes) {
- ms.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
- DIRECTORY_COLUMN.fetch(ms);
+ ms.fetchColumnFamily(DataFileColumnFamily.NAME);
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
for (Entry<Key,Value> cell : ms) {
Key key = cell.getKey();
- if (key.getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
+ if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
}
- if (DIRECTORY_COLUMN.hasColumns(key)) {
+ if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
}
}
@@ -775,13 +783,13 @@ public class MetadataTable extends org.a
} else {
String value = StringUtil.join(entry.logSet, ";") + "|" + entry.tabletId;
Mutation m = new Mutation(entry.extent.getMetadataEntry());
- m.put(LOG_COLUMN_FAMILY, new Text(entry.server + "/" + entry.filename), new Value(value.getBytes()));
+ m.put(LogColumnFamily.NAME, new Text(entry.server + "/" + entry.filename), new Value(value.getBytes()));
update(credentials, zooLock, m, entry.extent);
}
}
public static LogEntry entryFromKeyValue(Key key, Value value) {
- MetadataTable.LogEntry e = new MetadataTable.LogEntry();
+ MetadataTableUtil.LogEntry e = new MetadataTableUtil.LogEntry();
e.extent = new KeyExtent(key.getRow(), EMPTY_TEXT);
String[] parts = key.getColumnQualifier().toString().split("/", 2);
e.server = parts[0];
@@ -813,10 +821,10 @@ public class MetadataTable extends org.a
}
} else {
- String systemTableToCheck = extent.isMeta() ? RootTable.ID : ID;
+ String systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, systemTableToCheck, Authorizations.EMPTY);
- scanner.fetchColumnFamily(LOG_COLUMN_FAMILY);
- scanner.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
+ scanner.fetchColumnFamily(LogColumnFamily.NAME);
+ scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
scanner.setRange(extent.toMetadataRange());
for (Entry<Key,Value> entry : scanner) {
@@ -824,9 +832,9 @@ public class MetadataTable extends org.a
throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
}
- if (entry.getKey().getColumnFamily().equals(LOG_COLUMN_FAMILY)) {
+ if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
result.add(entryFromKeyValue(entry.getKey(), entry.getValue()));
- } else if (entry.getKey().getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
+ } else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
DataFileValue dfv = new DataFileValue(entry.getValue().get());
sizes.put(new FileRef(fs, entry.getKey()), dfv);
} else {
@@ -850,7 +858,7 @@ public class MetadataTable extends org.a
Text pattern = extent.getMetadataEntry();
for (Entry<Key,Value> entry : scanner) {
Text row = entry.getKey().getRow();
- if (entry.getKey().getColumnFamily().equals(LOG_COLUMN_FAMILY)) {
+ if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
if (row.equals(pattern)) {
result.add(entryFromKeyValue(entry.getKey(), entry.getValue()));
}
@@ -894,13 +902,13 @@ public class MetadataTable extends org.a
}
private static Scanner getTabletLogScanner(TCredentials credentials, KeyExtent extent) {
- String tableId = ID;
+ String tableId = MetadataTable.ID;
if (extent.isMeta())
tableId = RootTable.ID;
Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, tableId, Authorizations.EMPTY);
- scanner.fetchColumnFamily(LOG_COLUMN_FAMILY);
+ scanner.fetchColumnFamily(LogColumnFamily.NAME);
Text start = extent.getMetadataEntry();
- Key endKey = new Key(start, LOG_COLUMN_FAMILY);
+ Key endKey = new Key(start, LogColumnFamily.NAME);
endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
scanner.setRange(new Range(new Key(start), endKey));
return scanner;
@@ -915,10 +923,10 @@ public class MetadataTable extends org.a
rootTabletEntries = getLogEntries(creds, RootTable.EXTENT).iterator();
try {
Scanner scanner = HdfsZooInstance.getInstance().getConnector(creds.getPrincipal(), CredentialHelper.extractToken(creds))
- .createScanner(NAME, Authorizations.EMPTY);
- log.info("Setting range to " + KEYSPACE);
- scanner.setRange(KEYSPACE);
- scanner.fetchColumnFamily(LOG_COLUMN_FAMILY);
+ .createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ log.info("Setting range to " + MetadataSchema.TabletsSection.getRange());
+ scanner.setRange(MetadataSchema.TabletsSection.getRange());
+ scanner.fetchColumnFamily(LogColumnFamily.NAME);
metadataEntries = scanner.iterator();
} catch (Exception ex) {
throw new IOException(ex);
@@ -966,7 +974,7 @@ public class MetadataTable extends org.a
}
} else {
Mutation m = new Mutation(entry.extent.getMetadataEntry());
- m.putDelete(LOG_COLUMN_FAMILY, new Text(entry.server + "/" + entry.filename));
+ m.putDelete(LogColumnFamily.NAME, new Text(entry.server + "/" + entry.filename));
update(SecurityConstants.getSystemCredentials(), zooLock, m, entry.extent);
}
}
@@ -974,7 +982,7 @@ public class MetadataTable extends org.a
private static void getFiles(Set<String> files, Map<Key,Value> tablet, String srcTableId) {
for (Entry<Key,Value> entry : tablet.entrySet()) {
- if (entry.getKey().getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
+ if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
String cf = entry.getKey().getColumnQualifier().toString();
if (srcTableId != null && !cf.startsWith("../") && !cf.contains(":")) {
cf = "../" + srcTableId + entry.getKey().getColumnQualifier();
@@ -990,14 +998,14 @@ public class MetadataTable extends org.a
Mutation m = new Mutation(KeyExtent.getMetadataEntry(new Text(tableId), ke.getEndRow()));
for (Entry<Key,Value> entry : tablet.entrySet()) {
- if (entry.getKey().getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
+ if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
String cf = entry.getKey().getColumnQualifier().toString();
if (!cf.startsWith("../") && !cf.contains(":"))
cf = "../" + srcTableId + entry.getKey().getColumnQualifier();
m.put(entry.getKey().getColumnFamily(), new Text(cf), entry.getValue());
- } else if (entry.getKey().getColumnFamily().equals(CURRENT_LOCATION_COLUMN_FAMILY)) {
- m.put(LAST_LOCATION_COLUMN_FAMILY, entry.getKey().getColumnQualifier(), entry.getValue());
- } else if (entry.getKey().getColumnFamily().equals(LAST_LOCATION_COLUMN_FAMILY)) {
+ } else if (entry.getKey().getColumnFamily().equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
+ m.put(TabletsSection.LastLocationColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
+ } else if (entry.getKey().getColumnFamily().equals(TabletsSection.LastLocationColumnFamily.NAME)) {
// skip
} else {
m.put(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier(), entry.getValue());
@@ -1007,14 +1015,14 @@ public class MetadataTable extends org.a
}
private static Scanner createCloneScanner(String tableId, Connector conn) throws TableNotFoundException {
- Scanner mscanner = new IsolatedScanner(conn.createScanner(NAME, Authorizations.EMPTY));
+ Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
- mscanner.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
- mscanner.fetchColumnFamily(CURRENT_LOCATION_COLUMN_FAMILY);
- mscanner.fetchColumnFamily(LAST_LOCATION_COLUMN_FAMILY);
- mscanner.fetchColumnFamily(CLONED_COLUMN_FAMILY);
- PREV_ROW_COLUMN.fetch(mscanner);
- TIME_COLUMN.fetch(mscanner);
+ mscanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+ mscanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+ mscanner.fetchColumnFamily(TabletsSection.LastLocationColumnFamily.NAME);
+ mscanner.fetchColumnFamily(ClonedColumnFamily.NAME);
+ TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(mscanner);
+ TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(mscanner);
return mscanner;
}
@@ -1051,7 +1059,7 @@ public class MetadataTable extends org.a
boolean cloneSuccessful = false;
for (Entry<Key,Value> entry : cloneTablet.entrySet()) {
- if (entry.getKey().getColumnFamily().equals(CLONED_COLUMN_FAMILY)) {
+ if (entry.getKey().getColumnFamily().equals(ClonedColumnFamily.NAME)) {
cloneSuccessful = true;
break;
}
@@ -1107,7 +1115,7 @@ public class MetadataTable extends org.a
} else {
// write out marker that this tablet was successfully cloned
Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow());
- m.put(CLONED_COLUMN_FAMILY, new Text(""), new Value("OK".getBytes()));
+ m.put(ClonedColumnFamily.NAME, new Text(""), new Value("OK".getBytes()));
bw.addMutation(m);
}
}
@@ -1119,7 +1127,7 @@ public class MetadataTable extends org.a
public static void cloneTable(Instance instance, String srcTableId, String tableId) throws Exception {
Connector conn = instance.getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
- BatchWriter bw = conn.createBatchWriter(NAME, new BatchWriterConfig());
+ BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
while (true) {
@@ -1152,9 +1160,9 @@ public class MetadataTable extends org.a
}
// delete the clone markers and create directory entries
- Scanner mscanner = conn.createScanner(NAME, Authorizations.EMPTY);
+ Scanner mscanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
- mscanner.fetchColumnFamily(CLONED_COLUMN_FAMILY);
+ mscanner.fetchColumnFamily(ClonedColumnFamily.NAME);
int dirCount = 0;
@@ -1162,7 +1170,7 @@ public class MetadataTable extends org.a
Key k = entry.getKey();
Mutation m = new Mutation(k.getRow());
m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
- DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
bw.addMutation(m);
}
@@ -1172,15 +1180,15 @@ public class MetadataTable extends org.a
public static void chopped(KeyExtent extent, ZooLock zooLock) {
Mutation m = new Mutation(extent.getMetadataEntry());
- CHOPPED_COLUMN.put(m, new Value("chopped".getBytes()));
+ ChoppedColumnFamily.CHOPPED_COLUMN.put(m, new Value("chopped".getBytes()));
update(SecurityConstants.getSystemCredentials(), zooLock, m, extent);
}
public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception {
- Scanner mscanner = new IsolatedScanner(conn.createScanner(NAME, Authorizations.EMPTY));
+ Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
- mscanner.fetchColumnFamily(BULKFILE_COLUMN_FAMILY);
- BatchWriter bw = conn.createBatchWriter(NAME, new BatchWriterConfig());
+ mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
+ BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
for (Entry<Key,Value> entry : mscanner) {
log.debug("Looking at entry " + entry + " with tid " + tid);
if (Long.parseLong(entry.getValue().toString()) == tid) {
@@ -1197,9 +1205,9 @@ public class MetadataTable extends org.a
List<FileRef> result = new ArrayList<FileRef>();
try {
VolumeManager fs = VolumeManagerImpl.get();
- Scanner mscanner = new IsolatedScanner(conn.createScanner(extent.isMeta() ? RootTable.NAME : NAME, Authorizations.EMPTY));
+ Scanner mscanner = new IsolatedScanner(conn.createScanner(extent.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY));
mscanner.setRange(extent.toMetadataRange());
- mscanner.fetchColumnFamily(BULKFILE_COLUMN_FAMILY);
+ mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
for (Entry<Key,Value> entry : mscanner) {
if (Long.parseLong(entry.getValue().toString()) == tid) {
result.add(new FileRef(fs, entry.getKey()));
@@ -1217,9 +1225,9 @@ public class MetadataTable extends org.a
Map<FileRef,Long> ret = new HashMap<FileRef,Long>();
VolumeManager fs = VolumeManagerImpl.get();
- Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, extent.isMeta() ? RootTable.ID : ID, Authorizations.EMPTY);
+ Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY);
scanner.setRange(new Range(metadataRow));
- scanner.fetchColumnFamily(BULKFILE_COLUMN_FAMILY);
+ scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
for (Entry<Key,Value> entry : scanner) {
Long tid = Long.parseLong(entry.getValue().toString());
ret.put(new FileRef(fs, entry.getKey()), tid);
@@ -1229,7 +1237,7 @@ public class MetadataTable extends org.a
public static void addBulkLoadInProgressFlag(String path) {
- Mutation m = new Mutation(BLIP_FLAG_PREFIX + path);
+ Mutation m = new Mutation(MetadataSchema.BlipSection.getRowPrefix() + path);
m.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
// new KeyExtent is only added to force update to write to the metadata table, not the root table
@@ -1239,7 +1247,7 @@ public class MetadataTable extends org.a
public static void removeBulkLoadInProgressFlag(String path) {
- Mutation m = new Mutation(BLIP_FLAG_PREFIX + path);
+ Mutation m = new Mutation(MetadataSchema.BlipSection.getRowPrefix() + path);
m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
// new KeyExtent is only added to force update to write to the metadata table, not the root table
@@ -1248,23 +1256,24 @@ public class MetadataTable extends org.a
}
public static void moveMetaDeleteMarkers(Instance instance, TCredentials creds) {
- if (true)
- throw new UnsupportedOperationException();
- // move delete markers from the normal delete keyspace to the root tablet delete keyspace if the files are for the !METADATA table
- Scanner scanner = new ScannerImpl(instance, creds, ID, Authorizations.EMPTY);
- scanner.setRange(new Range(DELETED_RANGE));
+ // move old delete markers to new location, to standardize table schema between all metadata tables
+ byte[] EMPTY_BYTES = new byte[0];
+ Scanner scanner = new ScannerImpl(instance, creds, RootTable.ID, Authorizations.EMPTY);
+ String oldDeletesPrefix = "!!~del";
+ Range oldDeletesRange = new Range(oldDeletesPrefix, true, "!!~dem", false);
+ scanner.setRange(oldDeletesRange);
for (Entry<Key,Value> entry : scanner) {
String row = entry.getKey().getRow().toString();
- if (row.startsWith(DELETED_RANGE.getStartKey().getRow().toString())) {
- String filename = row.substring(DELETED_RANGE.getStartKey().getRow().toString().length());
+ if (row.startsWith(oldDeletesPrefix)) {
+ String filename = row.substring(oldDeletesPrefix.length());
// add the new entry first
- log.info("Moving " + filename + " marker to the root tablet");
- Mutation m = new Mutation(DELETED_RANGE.getStartKey().getRow().toString() + filename);
- m.put(new byte[] {}, new byte[] {}, new byte[] {});
+ log.info("Moving " + filename + " marker in " + RootTable.NAME);
+ Mutation m = new Mutation(MetadataSchema.DeletesSection.getRowPrefix() + filename);
+ m.put(EMPTY_BYTES, EMPTY_BYTES, EMPTY_BYTES);
update(creds, m, null);
// remove the old entry
m = new Mutation(entry.getKey().getRow());
- m.putDelete(new byte[] {}, new byte[] {});
+ m.putDelete(EMPTY_BYTES, EMPTY_BYTES);
update(creds, m, null);
} else {
break;
@@ -1272,4 +1281,33 @@ public class MetadataTable extends org.a
}
}
+
+ public static SortedMap<Text,SortedMap<ColumnFQ,Value>> getTabletEntries(SortedMap<Key,Value> tabletKeyValues, List<ColumnFQ> columns) {
+ TreeMap<Text,SortedMap<ColumnFQ,Value>> tabletEntries = new TreeMap<Text,SortedMap<ColumnFQ,Value>>();
+
+ HashSet<ColumnFQ> colSet = null;
+ if (columns != null) {
+ colSet = new HashSet<ColumnFQ>(columns);
+ }
+
+ for (Entry<Key,Value> entry : tabletKeyValues.entrySet()) {
+
+ if (columns != null && !colSet.contains(new ColumnFQ(entry.getKey()))) {
+ continue;
+ }
+
+ Text row = entry.getKey().getRow();
+
+ SortedMap<ColumnFQ,Value> colVals = tabletEntries.get(row);
+ if (colVals == null) {
+ colVals = new TreeMap<ColumnFQ,Value>();
+ tabletEntries.put(row, colVals);
+ }
+
+ colVals.put(new ColumnFQ(entry.getKey()), entry.getValue());
+ }
+
+ return tabletEntries;
+ }
+
}
Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java Wed Jul 3 18:32:51 2013
@@ -44,16 +44,19 @@ import org.apache.accumulo.core.iterator
import org.apache.accumulo.core.iterators.system.MultiIterator;
import org.apache.accumulo.core.iterators.system.VisibilityFilter;
import org.apache.accumulo.core.iterators.user.VersioningIterator;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.util.LocalityGroupUtil;
-import org.apache.accumulo.core.util.RootTable;
import org.apache.accumulo.core.util.TextUtil;
import org.apache.accumulo.server.ServerConstants;
import org.apache.accumulo.server.client.HdfsZooInstance;
import org.apache.accumulo.server.conf.ServerConfiguration;
import org.apache.accumulo.server.fs.VolumeManager;
import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.server.util.MetadataTable.LogEntry;
+import org.apache.accumulo.server.util.MetadataTableUtil.LogEntry;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -126,7 +129,7 @@ public class OfflineMetadataScanner exte
this.conf = conf;
List<LogEntry> rwal;
try {
- rwal = MetadataTable.getLogEntries(null, RootTable.EXTENT);
+ rwal = MetadataTableUtil.getLogEntries(null, RootTable.EXTENT);
} catch (Exception e) {
throw new RuntimeException("Failed to check if root tablet has write ahead log entries", e);
}
@@ -144,15 +147,15 @@ public class OfflineMetadataScanner exte
List<SortedKeyValueIterator<Key,Value>> readers = openMapFiles(allFiles, fs, conf);
HashSet<Column> columns = new HashSet<Column>();
- columns.add(new Column(TextUtil.getBytes(MetadataTable.DATAFILE_COLUMN_FAMILY), null, null));
- columns.add(new Column(TextUtil.getBytes(MetadataTable.LOG_COLUMN_FAMILY), null, null));
+ columns.add(new Column(TextUtil.getBytes(DataFileColumnFamily.NAME), null, null));
+ columns.add(new Column(TextUtil.getBytes(LogColumnFamily.NAME), null, null));
SortedKeyValueIterator<Key,Value> ssi = createSystemIter(new Range(), readers, columns);
int walogs = 0;
while (ssi.hasTop()) {
- if (ssi.getTopKey().compareColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY) == 0) {
+ if (ssi.getTopKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
allFiles.add(fs.getFullPath(ssi.getTopKey()).toString());
} else {
walogs++;
@@ -261,7 +264,7 @@ public class OfflineMetadataScanner exte
ServerConfiguration conf = new ServerConfiguration(HdfsZooInstance.getInstance());
VolumeManager fs = VolumeManagerImpl.get();
OfflineMetadataScanner scanner = new OfflineMetadataScanner(conf.getConfiguration(), fs);
- scanner.setRange(MetadataTable.KEYSPACE);
+ scanner.setRange(MetadataSchema.TabletsSection.getRange());
for (Entry<Key,Value> entry : scanner)
System.out.println(entry.getKey() + " " + entry.getValue());
}
Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java Wed Jul 3 18:32:51 2013
@@ -26,7 +26,9 @@ import org.apache.accumulo.core.client.S
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.server.cli.ClientOpts;
import org.apache.accumulo.server.fs.VolumeManager;
import org.apache.accumulo.server.fs.VolumeManagerImpl;
@@ -43,7 +45,7 @@ public class RemoveEntriesForMissingFile
private static Logger log = Logger.getLogger(RemoveEntriesForMissingFiles.class);
static class Opts extends ClientOpts {
- @Parameter(names="--fix")
+ @Parameter(names = "--fix")
boolean fix = false;
}
@@ -56,11 +58,11 @@ public class RemoveEntriesForMissingFile
Connector connector = opts.getConnector();
Scanner metadata = connector.createScanner(MetadataTable.NAME, opts.auths);
metadata.setBatchSize(scanOpts.scanBatchSize);
- metadata.setRange(MetadataTable.KEYSPACE);
- metadata.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+ metadata.setRange(MetadataSchema.TabletsSection.getRange());
+ metadata.fetchColumnFamily(DataFileColumnFamily.NAME);
int count = 0;
int missing = 0;
- BatchWriter writer = null;
+ BatchWriter writer = null;
if (opts.fix)
writer = connector.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
for (Entry<Key,Value> entry : metadata) {
Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java Wed Jul 3 18:32:51 2013
@@ -39,6 +39,8 @@ import org.apache.accumulo.core.conf.Def
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.KeyExtent;
import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.util.NumUtil;
import org.apache.accumulo.server.ServerConstants;
@@ -54,7 +56,6 @@ import com.beust.jcommander.Parameter;
public class TableDiskUsage {
-
private static final Logger log = Logger.getLogger(Logger.class);
private int nextInternalId = 0;
private Map<String,Integer> internalIds = new HashMap<String,Integer>();
@@ -91,7 +92,7 @@ public class TableDiskUsage {
}
Map<List<String>,Long> calculateUsage() {
-
+
Map<List<Integer>,Long> usage = new HashMap<List<Integer>,Long>();
for (Entry<String,Integer[]> entry : tableFiles.entrySet()) {
@@ -138,8 +139,7 @@ public class TableDiskUsage {
}, humanReadable);
}
- public static Map<TreeSet<String>,Long> getDiskUsage(AccumuloConfiguration acuConf, Set<String> tableIds, FileSystem fs, Connector conn)
- throws IOException {
+ public static Map<TreeSet<String>,Long> getDiskUsage(AccumuloConfiguration acuConf, Set<String> tableIds, FileSystem fs, Connector conn) throws IOException {
TableDiskUsage tdu = new TableDiskUsage();
for (String tableId : tableIds)
@@ -155,7 +155,7 @@ public class TableDiskUsage {
} catch (TableNotFoundException e) {
throw new RuntimeException(e);
}
- mdScanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+ mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
mdScanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
if (!mdScanner.iterator().hasNext()) {
@@ -264,13 +264,12 @@ public class TableDiskUsage {
printer.print(String.format(valueFormat + " %s", value, entry.getKey()));
}
}
-
static class Opts extends ClientOpts {
- @Parameter(description=" <table> { <table> ... } ")
+ @Parameter(description = " <table> { <table> ... } ")
List<String> tables = new ArrayList<String>();
}
-
+
/**
* @param args
*/
Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java Wed Jul 3 18:32:51 2013
@@ -30,7 +30,8 @@ import org.apache.accumulo.core.data.Key
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
import org.apache.accumulo.core.util.UtilWaitThread;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Logger;
@@ -93,8 +94,8 @@ public class TabletIterator implements I
this.scanner = s;
this.range = range;
this.scanner.setRange(range);
- MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
- MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
+ TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
this.iter = s.iterator();
this.returnPrevEndRow = returnPrevEndRow;
this.returnDir = returnDir;
@@ -112,7 +113,7 @@ public class TabletIterator implements I
Key prevEndRowKey = currentTabletKeys.lastKey();
Value prevEndRowValue = currentTabletKeys.get(prevEndRowKey);
- if (!MetadataTable.PREV_ROW_COLUMN.hasColumns(prevEndRowKey)) {
+ if (!TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(prevEndRowKey)) {
log.debug(currentTabletKeys);
throw new RuntimeException("Unexpected key " + prevEndRowKey);
}
@@ -176,11 +177,11 @@ public class TabletIterator implements I
while (esIter.hasNext()) {
Map.Entry<Key,Value> entry = esIter.next();
- if (!returnPrevEndRow && MetadataTable.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
+ if (!returnPrevEndRow && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
esIter.remove();
}
- if (!returnDir && MetadataTable.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
+ if (!returnDir && TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
esIter.remove();
}
}
@@ -216,7 +217,7 @@ public class TabletIterator implements I
tm.put(entry.getKey(), entry.getValue());
- if (MetadataTable.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
+ if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
sawPrevEndRow = true;
break;
}
Modified: accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java (original)
+++ accumulo/trunk/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java Wed Jul 3 18:32:51 2013
@@ -22,9 +22,7 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
-import java.util.SortedSet;
import java.util.TreeMap;
-import java.util.TreeSet;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
@@ -34,6 +32,7 @@ import org.apache.accumulo.core.client.A
import org.apache.accumulo.core.client.Connector;
import org.apache.accumulo.core.client.Instance;
import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.impl.Tables;
import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.data.KeyExtent;
@@ -44,6 +43,7 @@ import org.apache.accumulo.core.data.thr
import org.apache.accumulo.core.data.thrift.TColumn;
import org.apache.accumulo.core.data.thrift.TKeyExtent;
import org.apache.accumulo.core.data.thrift.TRange;
+import org.apache.accumulo.core.metadata.MetadataServicer;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.CredentialHelper;
import org.apache.accumulo.core.security.thrift.TCredentials;
@@ -85,32 +85,36 @@ public class VerifyTabletAssignments {
else
System.out.println("Checking table " + tableName + " again, failures " + check.size());
- Map<KeyExtent,String> locations = new TreeMap<KeyExtent,String>();
- SortedSet<KeyExtent> tablets = new TreeSet<KeyExtent>();
+ TreeMap<KeyExtent,String> tabletLocations = new TreeMap<KeyExtent,String>();
Connector conn = opts.getConnector();
Instance inst = conn.getInstance();
- MetadataTable.getEntries(conn.getInstance(), CredentialHelper.create(opts.principal, opts.getToken(), opts.instance), tableName, false, locations, tablets);
+ String tableId = Tables.getNameToIdMap(inst).get(tableName);
+ TCredentials credentials = CredentialHelper.create(opts.principal, opts.getToken(), opts.instance);
+ MetadataServicer.forTableId(conn.getInstance(), credentials, tableId).getTabletLocations(tabletLocations);
final HashSet<KeyExtent> failures = new HashSet<KeyExtent>();
- for (KeyExtent keyExtent : tablets)
- if (!locations.containsKey(keyExtent))
- System.out.println(" Tablet " + keyExtent + " has no location");
- else if (opts.verbose)
- System.out.println(" Tablet " + keyExtent + " is located at " + locations.get(keyExtent));
-
Map<String,List<KeyExtent>> extentsPerServer = new TreeMap<String,List<KeyExtent>>();
- for (Entry<KeyExtent,String> entry : locations.entrySet()) {
- List<KeyExtent> extentList = extentsPerServer.get(entry.getValue());
- if (extentList == null) {
- extentList = new ArrayList<KeyExtent>();
- extentsPerServer.put(entry.getValue(), extentList);
- }
+ for (Entry<KeyExtent,String> entry : tabletLocations.entrySet()) {
+ KeyExtent keyExtent = entry.getKey();
+ String loc = entry.getValue();
+ if (loc == null)
+ System.out.println(" Tablet " + keyExtent + " has no location");
+ else if (opts.verbose)
+ System.out.println(" Tablet " + keyExtent + " is located at " + loc);
- if (check == null || check.contains(entry.getKey()))
- extentList.add(entry.getKey());
+ if (loc != null) {
+ List<KeyExtent> extentList = extentsPerServer.get(loc);
+ if (extentList == null) {
+ extentList = new ArrayList<KeyExtent>();
+ extentsPerServer.put(loc, extentList);
+ }
+
+ if (check == null || check.contains(keyExtent))
+ extentList.add(keyExtent);
+ }
}
ExecutorService tp = Executors.newFixedThreadPool(20);
Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java (original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java Wed Jul 3 18:32:51 2013
@@ -25,7 +25,8 @@ import java.util.List;
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
import org.apache.hadoop.io.Text;
import org.apache.log4j.Level;
@@ -53,12 +54,12 @@ public class MetadataConstraintsTest {
};
}
}
-
+
@Test
public void testCheck() {
Logger.getLogger(AccumuloConfiguration.class).setLevel(Level.ERROR);
Mutation m = new Mutation(new Text("0;foo"));
- MetadataTable.PREV_ROW_COLUMN.put(m, new Value("1foo".getBytes()));
+ TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("1foo".getBytes()));
MetadataConstraints mc = new MetadataConstraints();
@@ -69,7 +70,7 @@ public class MetadataConstraintsTest {
assertEquals(Short.valueOf((short) 3), violations.get(0));
m = new Mutation(new Text("0:foo"));
- MetadataTable.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
+ TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
violations = mc.check(null, m);
@@ -87,7 +88,7 @@ public class MetadataConstraintsTest {
assertEquals(Short.valueOf((short) 2), violations.get(0));
m = new Mutation(new Text("!!<"));
- MetadataTable.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
+ TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
violations = mc.check(null, m);
@@ -97,7 +98,7 @@ public class MetadataConstraintsTest {
assertEquals(Short.valueOf((short) 5), violations.get(1));
m = new Mutation(new Text("0;foo"));
- MetadataTable.PREV_ROW_COLUMN.put(m, new Value("".getBytes()));
+ TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("".getBytes()));
violations = mc.check(null, m);
@@ -106,28 +107,28 @@ public class MetadataConstraintsTest {
assertEquals(Short.valueOf((short) 6), violations.get(0));
m = new Mutation(new Text("0;foo"));
- MetadataTable.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
+ TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
violations = mc.check(null, m);
assertEquals(null, violations);
m = new Mutation(new Text("!0<"));
- MetadataTable.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
+ TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
violations = mc.check(null, m);
assertEquals(null, violations);
m = new Mutation(new Text("!1<"));
- MetadataTable.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
+ TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
violations = mc.check(null, m);
assertNotNull(violations);
assertEquals(1, violations.size());
assertEquals(Short.valueOf((short) 4), violations.get(0));
-
+
}
@Test
@@ -135,20 +136,20 @@ public class MetadataConstraintsTest {
MetadataConstraints mc = new TestMetadataConstraints();
Mutation m;
List<Short> violations;
-
+
// inactive txid
m = new Mutation(new Text("0;foo"));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
- m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
+ m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
violations = mc.check(null, m);
assertNotNull(violations);
assertEquals(1, violations.size());
- assertEquals(Short.valueOf((short)8), violations.get(0));
+ assertEquals(Short.valueOf((short) 8), violations.get(0));
// txid that throws exception
m = new Mutation(new Text("0;foo"));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("9".getBytes()));
- m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("9".getBytes()));
+ m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
violations = mc.check(null, m);
assertNotNull(violations);
assertEquals(1, violations.size());
@@ -156,14 +157,14 @@ public class MetadataConstraintsTest {
// active txid w/ file
m = new Mutation(new Text("0;foo"));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
- m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+ m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
violations = mc.check(null, m);
assertNull(violations);
// active txid w/o file
m = new Mutation(new Text("0;foo"));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
violations = mc.check(null, m);
assertNotNull(violations);
assertEquals(1, violations.size());
@@ -171,69 +172,68 @@ public class MetadataConstraintsTest {
// two active txids w/ files
m = new Mutation(new Text("0;foo"));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
- m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("7".getBytes()));
- m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("1,1".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+ m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("7".getBytes()));
+ m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new Value("1,1".getBytes()));
violations = mc.check(null, m);
assertNotNull(violations);
assertEquals(1, violations.size());
assertEquals(Short.valueOf((short) 8), violations.get(0));
-
+
// two files w/ one active txid
m = new Mutation(new Text("0;foo"));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
- m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("5".getBytes()));
- m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("1,1".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+ m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("5".getBytes()));
+ m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new Value("1,1".getBytes()));
violations = mc.check(null, m);
assertNull(violations);
-
+
// two loaded w/ one active txid and one file
m = new Mutation(new Text("0;foo"));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
- m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("5".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+ m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("5".getBytes()));
violations = mc.check(null, m);
assertNotNull(violations);
assertEquals(1, violations.size());
assertEquals(Short.valueOf((short) 8), violations.get(0));
-
+
// active txid, mutation that looks like split
m = new Mutation(new Text("0;foo"));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
- MetadataTable.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
violations = mc.check(null, m);
assertNull(violations);
// inactive txid, mutation that looks like split
m = new Mutation(new Text("0;foo"));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
- MetadataTable.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
violations = mc.check(null, m);
assertNull(violations);
// active txid, mutation that looks like a load
m = new Mutation(new Text("0;foo"));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
- m.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+ m.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
violations = mc.check(null, m);
assertNull(violations);
// inactive txid, mutation that looks like a load
m = new Mutation(new Text("0;foo"));
- m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
- m.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
+ m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
+ m.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
violations = mc.check(null, m);
assertNull(violations);
// deleting a load flag
m = new Mutation(new Text("0;foo"));
- m.putDelete(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"));
+ m.putDelete(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"));
violations = mc.check(null, m);
assertNull(violations);
-
-
+
}
}
Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java (original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java Wed Jul 3 18:32:51 2013
@@ -31,10 +31,10 @@ import org.apache.accumulo.core.client.s
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.CredentialHelper;
import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
import org.apache.accumulo.server.fs.VolumeManager;
import org.apache.accumulo.server.fs.VolumeManagerImpl;
import org.apache.hadoop.io.Text;
Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java (original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java Wed Jul 3 18:32:51 2013
@@ -21,8 +21,6 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.TreeMap;
-import org.junit.Assert;
-
import org.apache.accumulo.core.conf.AccumuloConfiguration;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
@@ -32,10 +30,12 @@ import org.apache.accumulo.core.iterator
import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
import org.apache.accumulo.core.iterators.SortedMapIterator;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.util.MetadataTable;
import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
import org.apache.hadoop.io.Text;
+import org.junit.Assert;
import org.junit.Test;
/**
@@ -83,24 +83,24 @@ public class MetadataBulkLoadFilterTest
TreeMap<Key,Value> expected = new TreeMap<Key,Value>();
// following should not be deleted by filter
- put(tm1, "2;m", MetadataTable.DIRECTORY_COLUMN, "/t1");
- put(tm1, "2;m", MetadataTable.DATAFILE_COLUMN_FAMILY, "/t1/file1", "1,1");
- put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file1", "5");
- put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file3", "7");
- put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file4", "9");
- put(tm1, "2<", MetadataTable.DIRECTORY_COLUMN, "/t2");
- put(tm1, "2<", MetadataTable.DATAFILE_COLUMN_FAMILY, "/t2/file2", "1,1");
- put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file6", "5");
- put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file7", "7");
- put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file8", "9");
- put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/fileC", null);
+ put(tm1, "2;m", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t1");
+ put(tm1, "2;m", DataFileColumnFamily.NAME, "/t1/file1", "1,1");
+ put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file1", "5");
+ put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file3", "7");
+ put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file4", "9");
+ put(tm1, "2<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t2");
+ put(tm1, "2<", DataFileColumnFamily.NAME, "/t2/file2", "1,1");
+ put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file6", "5");
+ put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file7", "7");
+ put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file8", "9");
+ put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/fileC", null);
expected.putAll(tm1);
-
+
// the following should be deleted by filter
- put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file5", "8");
- put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file9", "8");
- put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/fileA", "2");
+ put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file5", "8");
+ put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file9", "8");
+ put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/fileA", "2");
TestMetadataBulkLoadFilter iter = new TestMetadataBulkLoadFilter();
iter.init(new SortedMapIterator(tm1), new HashMap<String,String>(), new IteratorEnvironment() {
@@ -111,8 +111,7 @@ public class MetadataBulkLoadFilterTest
}
@Override
- public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) {
- }
+ public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) {}
@Override
public boolean isFullMajorCompaction() {
@@ -129,7 +128,7 @@ public class MetadataBulkLoadFilterTest
return null;
}
});
-
+
iter.seek(new Range(), new ArrayList<ByteSequence>(), false);
TreeMap<Key,Value> actual = new TreeMap<Key,Value>();
Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java (original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java Wed Jul 3 18:32:51 2013
@@ -34,10 +34,12 @@ import org.apache.accumulo.core.data.Key
import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.security.CredentialHelper;
import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
import org.apache.accumulo.server.master.state.Assignment;
import org.apache.accumulo.server.master.state.CurrentState;
import org.apache.accumulo.server.master.state.MergeInfo;
@@ -101,14 +103,14 @@ public class TestMergeState {
for (String s : splits) {
Text split = new Text(s);
Mutation prevRow = KeyExtent.getPrevRowUpdateMutation(new KeyExtent(tableId, split, pr));
- prevRow.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
- MetadataTable.CHOPPED_COLUMN.put(prevRow, new Value("junk".getBytes()));
+ prevRow.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
+ ChoppedColumnFamily.CHOPPED_COLUMN.put(prevRow, new Value("junk".getBytes()));
bw.addMutation(prevRow);
pr = split;
}
// Add the default tablet
Mutation defaultTablet = KeyExtent.getPrevRowUpdateMutation(new KeyExtent(tableId, null, pr));
- defaultTablet.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
+ defaultTablet.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
bw.addMutation(defaultTablet);
bw.close();
@@ -128,8 +130,8 @@ public class TestMergeState {
// Create the hole
// Split the tablet at one end of the range
Mutation m = new KeyExtent(tableId, new Text("t"), new Text("p")).getPrevRowUpdateMutation();
- MetadataTable.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
- MetadataTable.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(new Text("o")));
+ TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
+ TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(new Text("o")));
update(connector, m);
// do the state check
@@ -139,7 +141,7 @@ public class TestMergeState {
// unassign the tablets
BatchDeleter deleter = connector.createBatchDeleter("!METADATA", Authorizations.EMPTY, 1000, new BatchWriterConfig());
- deleter.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+ deleter.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
deleter.setRanges(Collections.singletonList(new Range()));
deleter.delete();
@@ -150,7 +152,7 @@ public class TestMergeState {
// finish the split
KeyExtent tablet = new KeyExtent(tableId, new Text("p"), new Text("o"));
m = tablet.getPrevRowUpdateMutation();
- MetadataTable.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
+ TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
update(connector, m);
metaDataStateStore.setLocations(Collections.singletonList(new Assignment(tablet, state.someTServer)));
@@ -160,7 +162,7 @@ public class TestMergeState {
// chop it
m = tablet.getPrevRowUpdateMutation();
- MetadataTable.CHOPPED_COLUMN.put(m, new Value("junk".getBytes()));
+ ChoppedColumnFamily.CHOPPED_COLUMN.put(m, new Value("junk".getBytes()));
update(connector, m);
stats = scan(state, metaDataStateStore);
Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java (original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java Wed Jul 3 18:32:51 2013
@@ -28,8 +28,8 @@ import java.util.HashSet;
import java.util.List;
import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.metadata.RootTable;
import org.apache.accumulo.core.util.AddressUtil;
-import org.apache.accumulo.core.util.RootTable;
import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
import org.apache.hadoop.io.Text;
import org.junit.Assert;
Modified: accumulo/trunk/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java?rev=1499510&r1=1499509&r2=1499510&view=diff
==============================================================================
--- accumulo/trunk/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java (original)
+++ accumulo/trunk/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java Wed Jul 3 18:32:51 2013
@@ -22,14 +22,13 @@ import java.util.TreeMap;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.KeyExtent;
import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.util.MetadataTable;
import org.apache.accumulo.server.master.state.TServerInstance;
import org.apache.hadoop.io.Text;
import org.junit.Assert;
import org.junit.Test;
-
public class CheckTabletMetadataTest {
private static Key nk(String row, ColumnFQ cfq) {
@@ -39,7 +38,7 @@ public class CheckTabletMetadataTest {
private static Key nk(String row, Text cf, String cq) {
return new Key(row, cf.toString(), cq);
}
-
+
private static void put(TreeMap<Key,Value> tabletMeta, String row, ColumnFQ cfq, byte[] val) {
Key k = new Key(new Text(row), cfq.getColumnFamily(), cfq.getColumnQualifier());
tabletMeta.put(k, new Value(val));
@@ -49,25 +48,25 @@ public class CheckTabletMetadataTest {
Key k = new Key(new Text(row), cf, new Text(cq));
tabletMeta.put(k, new Value(val.getBytes()));
}
-
+
private static void assertFail(TreeMap<Key,Value> tabletMeta, KeyExtent ke, TServerInstance tsi) {
try {
Assert.assertNull(TabletServer.checkTabletMetadata(ke, tsi, tabletMeta, ke.getMetadataEntry()));
} catch (Exception e) {
-
+
}
}
-
+
private static void assertFail(TreeMap<Key,Value> tabletMeta, KeyExtent ke, TServerInstance tsi, Key keyToDelete) {
TreeMap<Key,Value> copy = new TreeMap<Key,Value>(tabletMeta);
Assert.assertNotNull(copy.remove(keyToDelete));
try {
Assert.assertNull(TabletServer.checkTabletMetadata(ke, tsi, copy, ke.getMetadataEntry()));
} catch (Exception e) {
-
+
}
}
-
+
@Test
public void testBadTabletMetadata() throws Exception {
@@ -75,10 +74,10 @@ public class CheckTabletMetadataTest {
TreeMap<Key,Value> tabletMeta = new TreeMap<Key,Value>();
- put(tabletMeta, "1<", MetadataTable.PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(null).get());
- put(tabletMeta, "1<", MetadataTable.DIRECTORY_COLUMN, "/t1".getBytes());
- put(tabletMeta, "1<", MetadataTable.TIME_COLUMN, "M0".getBytes());
- put(tabletMeta, "1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "4", "127.0.0.1:9997");
+ put(tabletMeta, "1<", TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(null).get());
+ put(tabletMeta, "1<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t1".getBytes());
+ put(tabletMeta, "1<", TabletsSection.ServerColumnFamily.TIME_COLUMN, "M0".getBytes());
+ put(tabletMeta, "1<", TabletsSection.FutureLocationColumnFamily.NAME, "4", "127.0.0.1:9997");
TServerInstance tsi = new TServerInstance("127.0.0.1:9997", 4);
@@ -94,30 +93,30 @@ public class CheckTabletMetadataTest {
assertFail(tabletMeta, new KeyExtent(new Text("1"), new Text("r"), new Text("m")), tsi);
- assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.PREV_ROW_COLUMN));
-
- assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.DIRECTORY_COLUMN));
+ assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN));
- assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.TIME_COLUMN));
+ assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN));
- assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "4"));
+ assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.ServerColumnFamily.TIME_COLUMN));
+
+ assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.FutureLocationColumnFamily.NAME, "4"));
TreeMap<Key,Value> copy = new TreeMap<Key,Value>(tabletMeta);
- put(copy, "1<", MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, "4", "127.0.0.1:9997");
+ put(copy, "1<", TabletsSection.CurrentLocationColumnFamily.NAME, "4", "127.0.0.1:9997");
assertFail(copy, ke, tsi);
- assertFail(copy, ke, tsi, nk("1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "4"));
+ assertFail(copy, ke, tsi, nk("1<", TabletsSection.FutureLocationColumnFamily.NAME, "4"));
copy = new TreeMap<Key,Value>(tabletMeta);
- put(copy, "1<", MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, "5", "127.0.0.1:9998");
+ put(copy, "1<", TabletsSection.CurrentLocationColumnFamily.NAME, "5", "127.0.0.1:9998");
assertFail(copy, ke, tsi);
- put(copy, "1<", MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, "6", "127.0.0.1:9999");
+ put(copy, "1<", TabletsSection.CurrentLocationColumnFamily.NAME, "6", "127.0.0.1:9999");
assertFail(copy, ke, tsi);
copy = new TreeMap<Key,Value>(tabletMeta);
- put(copy, "1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "5", "127.0.0.1:9998");
+ put(copy, "1<", TabletsSection.FutureLocationColumnFamily.NAME, "5", "127.0.0.1:9998");
assertFail(copy, ke, tsi);
assertFail(new TreeMap<Key,Value>(), ke, tsi);
-
+
}
}