You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by kt...@apache.org on 2018/12/21 22:35:55 UTC
[accumulo] branch master updated: fixes #815 test converting key
values to TabletMetadata object (#851)
This is an automated email from the ASF dual-hosted git repository.
kturner pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/accumulo.git
The following commit(s) were added to refs/heads/master by this push:
new fdd885c fixes #815 test converting key values to TabletMetadata object (#851)
fdd885c is described below
commit fdd885c0ee92495d2427f64c3e38466d3d6adc9a
Author: Keith Turner <kt...@apache.org>
AuthorDate: Fri Dec 21 17:35:51 2018 -0500
fixes #815 test converting key values to TabletMetadata object (#851)
---
.../core/metadata/schema/TabletMetadata.java | 2 +-
.../core/metadata/schema/TabletMetadataTest.java | 167 +++++++++++++++++++++
2 files changed, 168 insertions(+), 1 deletion(-)
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/TabletMetadata.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/TabletMetadata.java
index 92f7720..55195ee 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/schema/TabletMetadata.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/TabletMetadata.java
@@ -233,7 +233,7 @@ public class TabletMetadata {
return keyValues;
}
- private static TabletMetadata convertRow(Iterator<Entry<Key,Value>> rowIter,
+ static TabletMetadata convertRow(Iterator<Entry<Key,Value>> rowIter,
EnumSet<FetchedColumns> fetchedColumns, boolean buildKeyValueMap) {
Objects.requireNonNull(rowIter);
diff --git a/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java b/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
new file mode 100644
index 0000000..1243d27
--- /dev/null
+++ b/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.core.metadata.schema;
+
+import static java.util.stream.Collectors.toSet;
+import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.COMPACT_COLUMN;
+import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
+import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.FLUSH_COLUMN;
+import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.TIME_COLUMN;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.EnumSet;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.clientImpl.Table.ID;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.dataImpl.KeyExtent;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.BulkFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ClonedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.CurrentLocationColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.FutureLocationColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LastLocationColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.TabletMetadata.FetchedColumns;
+import org.apache.accumulo.core.metadata.schema.TabletMetadata.LocationType;
+import org.apache.accumulo.core.tabletserver.log.LogEntry;
+import org.apache.accumulo.core.util.HostAndPort;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+
+public class TabletMetadataTest {
+
+ @Test
+ public void testAllColumns() {
+ KeyExtent extent = new KeyExtent(ID.of("5"), new Text("df"), new Text("da"));
+
+ Mutation mutation = extent.getPrevRowUpdateMutation();
+
+ COMPACT_COLUMN.put(mutation, new Value("5"));
+ DIRECTORY_COLUMN.put(mutation, new Value("/a/t/6/a/"));
+ FLUSH_COLUMN.put(mutation, new Value("6"));
+ TIME_COLUMN.put(mutation, new Value("M123456789"));
+
+ mutation.at().family(BulkFileColumnFamily.NAME).qualifier("bf1").put("");
+ mutation.at().family(BulkFileColumnFamily.NAME).qualifier("bf2").put("");
+
+ mutation.at().family(ClonedColumnFamily.NAME).qualifier("").put("OK");
+
+ DataFileValue dfv1 = new DataFileValue(555, 23);
+ mutation.at().family(DataFileColumnFamily.NAME).qualifier("df1").put(dfv1.encode());
+ DataFileValue dfv2 = new DataFileValue(234, 13);
+ mutation.at().family(DataFileColumnFamily.NAME).qualifier("df2").put(dfv2.encode());
+
+ mutation.at().family(CurrentLocationColumnFamily.NAME).qualifier("s001").put("server1:8555");
+
+ mutation.at().family(LastLocationColumnFamily.NAME).qualifier("s000").put("server2:8555");
+
+ LogEntry le1 = new LogEntry(extent, 55, "server1", "lf1");
+ mutation.at().family(le1.getColumnFamily()).qualifier(le1.getColumnQualifier())
+ .timestamp(le1.timestamp).put(le1.getValue());
+ LogEntry le2 = new LogEntry(extent, 57, "server1", "lf2");
+ mutation.at().family(le2.getColumnFamily()).qualifier(le2.getColumnQualifier())
+ .timestamp(le2.timestamp).put(le2.getValue());
+
+ mutation.at().family(ScanFileColumnFamily.NAME).qualifier("sf1").put("");
+ mutation.at().family(ScanFileColumnFamily.NAME).qualifier("sf2").put("");
+
+ SortedMap<Key,Value> rowMap = toRowMap(mutation);
+
+ TabletMetadata tm = TabletMetadata.convertRow(rowMap.entrySet().iterator(),
+ EnumSet.allOf(FetchedColumns.class), true);
+
+ assertEquals("OK", tm.getCloned());
+ assertEquals(5L, tm.getCompactId().getAsLong());
+ assertEquals("/a/t/6/a/", tm.getDir());
+ assertEquals(extent.getEndRow(), tm.getEndRow());
+ assertEquals(extent, tm.getExtent());
+ assertEquals(ImmutableSet.of("df1", "df2"), ImmutableSet.copyOf(tm.getFiles()));
+ assertEquals(ImmutableMap.of("df1", dfv1, "df2", dfv2), tm.getFilesMap());
+ assertEquals(6L, tm.getFlushId().getAsLong());
+ assertEquals(rowMap, tm.getKeyValues());
+ assertEquals(ImmutableSet.of("bf1", "bf2"), ImmutableSet.copyOf(tm.getLoaded()));
+ assertEquals(HostAndPort.fromParts("server1", 8555), tm.getLocation().getHostAndPort());
+ assertEquals("s001", tm.getLocation().getSession());
+ assertEquals(LocationType.CURRENT, tm.getLocation().getType());
+ assertTrue(tm.hasCurrent());
+ assertEquals(HostAndPort.fromParts("server2", 8555), tm.getLast().getHostAndPort());
+ assertEquals("s000", tm.getLast().getSession());
+ assertEquals(LocationType.LAST, tm.getLast().getType());
+ assertEquals(
+ ImmutableSet.of(le1.getName() + " " + le1.timestamp, le2.getName() + " " + le2.timestamp),
+ tm.getLogs().stream().map(le -> le.getName() + " " + le.timestamp).collect(toSet()));
+ assertEquals(extent.getPrevEndRow(), tm.getPrevEndRow());
+ assertEquals(extent.getTableId(), tm.getTableId());
+ assertTrue(tm.sawPrevEndRow());
+ assertEquals("M123456789", tm.getTime());
+ assertEquals(ImmutableSet.of("sf1", "sf2"), ImmutableSet.copyOf(tm.getScans()));
+ }
+
+ @Test
+ public void testFuture() {
+ KeyExtent extent = new KeyExtent(ID.of("5"), new Text("df"), new Text("da"));
+
+ Mutation mutation = extent.getPrevRowUpdateMutation();
+ mutation.at().family(FutureLocationColumnFamily.NAME).qualifier("s001").put("server1:8555");
+
+ SortedMap<Key,Value> rowMap = toRowMap(mutation);
+
+ TabletMetadata tm = TabletMetadata.convertRow(rowMap.entrySet().iterator(),
+ EnumSet.allOf(FetchedColumns.class), false);
+
+ assertEquals(extent, tm.getExtent());
+ assertEquals(HostAndPort.fromParts("server1", 8555), tm.getLocation().getHostAndPort());
+ assertEquals("s001", tm.getLocation().getSession());
+ assertEquals(LocationType.FUTURE, tm.getLocation().getType());
+ assertFalse(tm.hasCurrent());
+ }
+
+ @Test(expected = IllegalStateException.class)
+ public void testFutureAndCurrent() {
+ KeyExtent extent = new KeyExtent(ID.of("5"), new Text("df"), new Text("da"));
+
+ Mutation mutation = extent.getPrevRowUpdateMutation();
+ mutation.at().family(CurrentLocationColumnFamily.NAME).qualifier("s001").put("server1:8555");
+ mutation.at().family(FutureLocationColumnFamily.NAME).qualifier("s001").put("server1:8555");
+
+ SortedMap<Key,Value> rowMap = toRowMap(mutation);
+
+ TabletMetadata.convertRow(rowMap.entrySet().iterator(), EnumSet.allOf(FetchedColumns.class),
+ false);
+ }
+
+ private SortedMap<Key,Value> toRowMap(Mutation mutation) {
+ SortedMap<Key,Value> rowMap = new TreeMap<>();
+ mutation.getUpdates().forEach(cu -> {
+ Key k = new Key(mutation.getRow(), cu.getColumnFamily(), cu.getColumnQualifier(),
+ cu.getTimestamp());
+ Value v = new Value(cu.getValue());
+ rowMap.put(k, v);
+ });
+ return rowMap;
+ }
+}