You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ec...@apache.org on 2015/06/04 20:52:42 UTC
[01/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Repository: accumulo
Updated Branches:
refs/heads/master ab5a867f1 -> 01ae5b858
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/conf/monitor_logger.xml
----------------------------------------------------------------------
diff --git a/test/src/test/resources/conf/monitor_logger.xml b/test/src/test/resources/conf/monitor_logger.xml
deleted file mode 100644
index 91a7671..0000000
--- a/test/src/test/resources/conf/monitor_logger.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
-<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
-
- <!-- Write out everything at the DEBUG level to the debug log -->
- <appender name="A2" class="org.apache.log4j.RollingFileAppender">
- <param name="File" value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.debug.log"/>
- <param name="MaxFileSize" value="100MB"/>
- <param name="MaxBackupIndex" value="10"/>
- <param name="Threshold" value="DEBUG"/>
- <layout class="org.apache.log4j.PatternLayout">
- <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %X{application} %m%n"/>
- </layout>
- </appender>
-
- <!-- Write out INFO and higher to the regular log -->
- <appender name="A3" class="org.apache.log4j.RollingFileAppender">
- <param name="File" value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.log"/>
- <param name="MaxFileSize" value="100MB"/>
- <param name="MaxBackupIndex" value="10"/>
- <param name="Threshold" value="INFO"/>
- <layout class="org.apache.log4j.PatternLayout">
- <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %X{application} %m%n"/>
- </layout>
- </appender>
-
- <!-- Keep the last few log messages for display to the user -->
- <appender name="GUI" class="org.apache.accumulo.server.monitor.LogService">
- <param name="keep" value="40"/>
- <param name="Threshold" value="WARN"/>
- </appender>
-
- <!-- Log accumulo messages to debug, normal and GUI -->
- <logger name="org.apache.accumulo" additivity="false">
- <level value="DEBUG"/>
- <appender-ref ref="A2" />
- <appender-ref ref="A3" />
- <appender-ref ref="GUI" />
- </logger>
-
- <!-- Log non-accumulo messages to debug, normal logs. -->
- <root>
- <level value="INFO"/>
- <appender-ref ref="A2" />
- <appender-ref ref="A3" />
- </root>
-
-</log4j:configuration>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/test/src/test/resources/log4j.properties b/test/src/test/resources/log4j.properties
deleted file mode 100644
index 26ea762..0000000
--- a/test/src/test/resources/log4j.properties
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-log4j.rootLogger=DEBUG, CA
-log4j.appender.CA=org.apache.log4j.ConsoleAppender
-log4j.appender.CA.layout=org.apache.log4j.PatternLayout
-log4j.appender.CA.layout.ConversionPattern=%d{ISO8601} [%c{2}] %-5p: %m%n
-
-log4j.logger.org.apache.accumulo.core=DEBUG
-log4j.logger.org.apache.accumulo.core.client.impl.MasterClient=INFO
-log4j.logger.org.apache.accumulo.core.client.impl.ServerClient=ERROR
-log4j.logger.org.apache.accumulo.core.util.shell.Shell.audit=OFF
-log4j.logger.org.apache.accumulo.core.util.shell.Shell=FATAL
-log4j.logger.org.apache.commons.vfs2.impl.DefaultFileSystemManager=WARN
-log4j.logger.org.apache.hadoop.io.compress.CodecPool=WARN
-log4j.logger.org.apache.hadoop.mapred=ERROR
-log4j.logger.org.apache.hadoop.tools.DistCp=WARN
-log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
-log4j.logger.org.apache.hadoop.util.ProcessTree=WARN
-log4j.logger.org.apache.zookeeper.ClientCnxn=FATAL
-log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=INFO
-log4j.logger.org.apache.zookeeper.ZooKeeper=WARN
-log4j.logger.org.apache.accumulo.core.file.rfile.bcfile=INFO
-log4j.logger.org.apache.accumulo.server.util.ReplicationTableUtil=TRACE
-log4j.logger.org.apache.accumulo.core.client.impl.ThriftScanner=INFO
-log4j.logger.org.apache.accumulo.fate.zookeeper.DistributedReadWriteLock=WARN
-log4j.logger.org.mortbay.log=WARN
-log4j.logger.org.apache.hadoop=WARN
-log4j.logger.org.apache.jasper=INFO
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
-log4j.logger.org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace=WARN
-log4j.logger.BlockStateChange=WARN
-log4j.logger.org.apache.accumulo.core.client.impl.TabletServerBatchReaderIterator=INFO
-log4j.logger.org.apache.hadoop.security=DEBUG
-log4j.logger.org.apache.hadoop.minikdc=DEBUG
-log4j.logger.org.apache.directory=INFO
-log4j.logger.org.apache.directory.api.ldap=WARN
-# This is really spammy at debug
-log4j.logger.org.apache.thrift.transport.TSaslTransport=INFO
-# From apache-ds/minikdc
-log4j.logger.org.apache.mina=INFO
-log4j.logger.org.apache.accumulo.server.thrift.UGIAssumingProcessor=TRACE
-log4j.logger.org.apache.hadoop.security.UserGroupInformation=INFO
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/randomwalk/Basic.xml
----------------------------------------------------------------------
diff --git a/test/src/test/resources/randomwalk/Basic.xml b/test/src/test/resources/randomwalk/Basic.xml
deleted file mode 100644
index 2dead02..0000000
--- a/test/src/test/resources/randomwalk/Basic.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<module>
-
-<package prefix="test" value="org.apache.accumulo.test.randomwalk.unit"/>
-
-<init id="test.CreateTable"/>
-
-<node id="test.CreateTable">
- <edge id="unit/Simple.xml" weight="1"/>
-</node>
-
-<node id="unit/Simple.xml">
- <edge id="unit/Simple.xml" weight="3"/>
- <edge id="test.DeleteTable" weight="1"/>
-</node>
-
-<node id="test.DeleteTable">
- <edge id="END" weight="1"/>
-</node>
-
-</module>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/randomwalk/Simple.xml
----------------------------------------------------------------------
diff --git a/test/src/test/resources/randomwalk/Simple.xml b/test/src/test/resources/randomwalk/Simple.xml
deleted file mode 100644
index cad940e..0000000
--- a/test/src/test/resources/randomwalk/Simple.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<module>
-
-<package prefix="test" value="org.apache.accumulo.test.randomwalk.unit"/>
-
-<init id="dummy.all"/>
-
-<node id="dummy.all">
- <edge id="test.Ingest" weight="1"/>
- <edge id="test.Verify" weight="1"/>
- <edge id="test.Scan" weight="1"/>
- <edge id="END" weight="1"/>
-</node>
-
-<node id="test.Ingest">
- <edge id="dummy.all" weight="1"/>
-</node>
-
-<node id="test.Verify">
- <edge id="dummy.all" weight="1"/>
-</node>
-
-<node id="test.Scan">
- <edge id="dummy.all" weight="1"/>
-</node>
-
-</module>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/unit/Basic.xml
----------------------------------------------------------------------
diff --git a/test/src/test/resources/unit/Basic.xml b/test/src/test/resources/unit/Basic.xml
deleted file mode 100644
index 2dead02..0000000
--- a/test/src/test/resources/unit/Basic.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<module>
-
-<package prefix="test" value="org.apache.accumulo.test.randomwalk.unit"/>
-
-<init id="test.CreateTable"/>
-
-<node id="test.CreateTable">
- <edge id="unit/Simple.xml" weight="1"/>
-</node>
-
-<node id="unit/Simple.xml">
- <edge id="unit/Simple.xml" weight="3"/>
- <edge id="test.DeleteTable" weight="1"/>
-</node>
-
-<node id="test.DeleteTable">
- <edge id="END" weight="1"/>
-</node>
-
-</module>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/unit/Simple.xml
----------------------------------------------------------------------
diff --git a/test/src/test/resources/unit/Simple.xml b/test/src/test/resources/unit/Simple.xml
deleted file mode 100644
index cad940e..0000000
--- a/test/src/test/resources/unit/Simple.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<module>
-
-<package prefix="test" value="org.apache.accumulo.test.randomwalk.unit"/>
-
-<init id="dummy.all"/>
-
-<node id="dummy.all">
- <edge id="test.Ingest" weight="1"/>
- <edge id="test.Verify" weight="1"/>
- <edge id="test.Scan" weight="1"/>
- <edge id="END" weight="1"/>
-</node>
-
-<node id="test.Ingest">
- <edge id="dummy.all" weight="1"/>
-</node>
-
-<node id="test.Verify">
- <edge id="dummy.all" weight="1"/>
-</node>
-
-<node id="test.Scan">
- <edge id="dummy.all" weight="1"/>
-</node>
-
-</module>
[24/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java
new file mode 100644
index 0000000..77198df
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/replication/ReplicationIT.java
@@ -0,0 +1,1436 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.replication;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.IteratorSetting.Column;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.TableOfflineException;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.iterators.conf.ColumnSet;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
+import org.apache.accumulo.core.protobuf.ProtobufUtil;
+import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
+import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection;
+import org.apache.accumulo.core.replication.ReplicationTable;
+import org.apache.accumulo.core.replication.ReplicationTarget;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.tabletserver.log.LogEntry;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.ZooCache;
+import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
+import org.apache.accumulo.fate.zookeeper.ZooLock;
+import org.apache.accumulo.gc.SimpleGarbageCollector;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.log.WalStateManager;
+import org.apache.accumulo.server.log.WalStateManager.WalState;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.replication.ReplicaSystemFactory;
+import org.apache.accumulo.server.replication.StatusCombiner;
+import org.apache.accumulo.server.replication.StatusFormatter;
+import org.apache.accumulo.server.replication.StatusUtil;
+import org.apache.accumulo.server.replication.proto.Replication.Status;
+import org.apache.accumulo.server.util.ReplicationTableUtil;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Function;
+import com.google.common.base.Joiner;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Multimap;
+import com.google.common.collect.Sets;
+import com.google.protobuf.TextFormat;
+
+/**
+ * Replication tests which verify expected functionality using a single MAC instance. A MockReplicaSystem is used to "fake" the peer instance that we're
+ * replicating to. This lets us test replication in a functional way without having to worry about two real systems.
+ */
+public class ReplicationIT extends ConfigurableMacBase {
+ private static final Logger log = LoggerFactory.getLogger(ReplicationIT.class);
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60 * 10;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ // Run the master replication loop run frequently
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "10s");
+ cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
+ cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
+ cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "1M");
+ cfg.setProperty(Property.GC_CYCLE_START, "1s");
+ cfg.setProperty(Property.GC_CYCLE_DELAY, "0");
+ cfg.setProperty(Property.REPLICATION_NAME, "master");
+ cfg.setProperty(Property.REPLICATION_WORK_PROCESSOR_DELAY, "1s");
+ cfg.setProperty(Property.REPLICATION_WORK_PROCESSOR_PERIOD, "1s");
+ cfg.setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX, "1M");
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ cfg.setNumTservers(1);
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ private Multimap<String,String> getLogs(Connector conn) throws Exception {
+ // Map of server to tableId
+ Multimap<TServerInstance,String> serverToTableID = HashMultimap.create();
+ Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ scanner.setRange(MetadataSchema.TabletsSection.getRange());
+ scanner.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
+ for (Entry<Key,Value> entry : scanner) {
+ TServerInstance key = new TServerInstance(entry.getValue(), entry.getKey().getColumnQualifier());
+ byte[] tableId = KeyExtent.tableOfMetadataRow(entry.getKey().getRow());
+ serverToTableID.put(key, new String(tableId, UTF_8));
+ }
+ // Map of logs to tableId
+ Multimap<String,String> logs = HashMultimap.create();
+ Instance i = conn.getInstance();
+ ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
+ WalStateManager wals = new WalStateManager(conn.getInstance(), zk);
+ for (Entry<TServerInstance,List<UUID>> entry : wals.getAllMarkers().entrySet()) {
+ for (UUID id : entry.getValue()) {
+ Pair<WalState,Path> state = wals.state(entry.getKey(), id);
+ for (String tableId : serverToTableID.get(entry.getKey())) {
+ logs.put(state.getSecond().toString(), tableId);
+ }
+ }
+ }
+ return logs;
+ }
+
+ private Multimap<String,String> getAllLogs(Connector conn) throws Exception {
+ Multimap<String,String> logs = getLogs(conn);
+ try {
+ Scanner scanner = conn.createScanner(ReplicationTable.NAME, Authorizations.EMPTY);
+ StatusSection.limit(scanner);
+ Text buff = new Text();
+ for (Entry<Key,Value> entry : scanner) {
+ if (Thread.interrupted()) {
+ Thread.currentThread().interrupt();
+ return logs;
+ }
+
+ StatusSection.getFile(entry.getKey(), buff);
+ String file = buff.toString();
+ StatusSection.getTableId(entry.getKey(), buff);
+ String tableId = buff.toString();
+
+ logs.put(file, tableId);
+ }
+ } catch (TableOfflineException e) {
+ log.debug("Replication table isn't online yet");
+ }
+ return logs;
+ }
+
+ private void waitForGCLock(Connector conn) throws InterruptedException {
+ // Check if the GC process has the lock before wasting our retry attempts
+ ZooKeeperInstance zki = (ZooKeeperInstance) conn.getInstance();
+ ZooCacheFactory zcf = new ZooCacheFactory();
+ ZooCache zcache = zcf.getZooCache(zki.getZooKeepers(), zki.getZooKeepersSessionTimeOut());
+ String zkPath = ZooUtil.getRoot(conn.getInstance()) + Constants.ZGC_LOCK;
+ log.info("Looking for GC lock at {}", zkPath);
+ byte[] data = ZooLock.getLockData(zcache, zkPath, null);
+ while (null == data) {
+ log.info("Waiting for GC ZooKeeper lock to be acquired");
+ Thread.sleep(1000);
+ data = ZooLock.getLockData(zcache, zkPath, null);
+ }
+ }
+
+ @Test
+ public void replicationTableCreated() throws AccumuloException, AccumuloSecurityException {
+ Assert.assertTrue(getConnector().tableOperations().exists(ReplicationTable.NAME));
+ Assert.assertEquals(ReplicationTable.ID, getConnector().tableOperations().tableIdMap().get(ReplicationTable.NAME));
+ }
+
+ @Test
+ public void verifyReplicationTableConfig() throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
+ TableOperations tops = getConnector().tableOperations();
+ Map<String,EnumSet<IteratorScope>> iterators = tops.listIterators(ReplicationTable.NAME);
+
+ // verify combiners are only iterators (no versioning)
+ Assert.assertEquals(1, iterators.size());
+
+ // look for combiner
+ Assert.assertTrue(iterators.containsKey(ReplicationTable.COMBINER_NAME));
+ Assert.assertTrue(iterators.get(ReplicationTable.COMBINER_NAME).containsAll(EnumSet.allOf(IteratorScope.class)));
+ for (IteratorScope scope : EnumSet.allOf(IteratorScope.class)) {
+ IteratorSetting is = tops.getIteratorSetting(ReplicationTable.NAME, ReplicationTable.COMBINER_NAME, scope);
+ Assert.assertEquals(30, is.getPriority());
+ Assert.assertEquals(StatusCombiner.class.getName(), is.getIteratorClass());
+ Assert.assertEquals(1, is.getOptions().size());
+ Assert.assertTrue(is.getOptions().containsKey("columns"));
+ String cols = is.getOptions().get("columns");
+ Column statusSectionCol = new Column(StatusSection.NAME);
+ Column workSectionCol = new Column(WorkSection.NAME);
+ Assert.assertEquals(
+ ColumnSet.encodeColumns(statusSectionCol.getColumnFamily(), statusSectionCol.getColumnQualifier()) + ","
+ + ColumnSet.encodeColumns(workSectionCol.getColumnFamily(), workSectionCol.getColumnQualifier()), cols);
+ }
+
+ boolean foundLocalityGroups = false;
+ boolean foundLocalityGroupDef1 = false;
+ boolean foundLocalityGroupDef2 = false;
+ boolean foundFormatter = false;
+ Joiner j = Joiner.on(",");
+ Function<Text,String> textToString = new Function<Text,String>() {
+ @Override
+ public String apply(Text text) {
+ return text.toString();
+ }
+ };
+ for (Entry<String,String> p : tops.getProperties(ReplicationTable.NAME)) {
+ String key = p.getKey();
+ String val = p.getValue();
+ // STATUS_LG_NAME, STATUS_LG_COLFAMS, WORK_LG_NAME, WORK_LG_COLFAMS
+ if (key.equals(Property.TABLE_FORMATTER_CLASS.getKey()) && val.equals(StatusFormatter.class.getName())) {
+ // look for formatter
+ foundFormatter = true;
+ } else if (key.equals(Property.TABLE_LOCALITY_GROUPS.getKey()) && val.equals(j.join(ReplicationTable.LOCALITY_GROUPS.keySet()))) {
+ // look for locality groups enabled
+ foundLocalityGroups = true;
+ } else if (key.startsWith(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey())) {
+ // look for locality group column family definitions
+ if (key.equals(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + ReplicationTable.STATUS_LG_NAME)
+ && val.equals(j.join(Iterables.transform(ReplicationTable.STATUS_LG_COLFAMS, textToString)))) {
+ foundLocalityGroupDef1 = true;
+ } else if (key.equals(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + ReplicationTable.WORK_LG_NAME)
+ && val.equals(j.join(Iterables.transform(ReplicationTable.WORK_LG_COLFAMS, textToString)))) {
+ foundLocalityGroupDef2 = true;
+ }
+ }
+ }
+ Assert.assertTrue(foundLocalityGroups);
+ Assert.assertTrue(foundLocalityGroupDef1);
+ Assert.assertTrue(foundLocalityGroupDef2);
+ Assert.assertTrue(foundFormatter);
+ }
+
+ @Test
+ public void correctRecordsCompleteFile() throws Exception {
+ Connector conn = getConnector();
+ String table = "table1";
+ conn.tableOperations().create(table);
+ // If we have more than one tserver, this is subject to a race condition.
+ conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
+
+ BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+ for (int i = 0; i < 10; i++) {
+ Mutation m = new Mutation(Integer.toString(i));
+ m.put(new byte[0], new byte[0], new byte[0]);
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ // After writing data, we'll get a replication table online
+ boolean online = ReplicationTable.isOnline(conn);
+ int attempts = 10;
+ do {
+ if (!online) {
+ UtilWaitThread.sleep(2000);
+ online = ReplicationTable.isOnline(conn);
+ attempts--;
+ }
+ } while (!online && attempts > 0);
+ Assert.assertTrue("Replication table was not online", online);
+
+ for (int i = 0; i < 5; i++) {
+ if (conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ)) {
+ break;
+ }
+ log.info("Could not read replication table, waiting and will retry");
+ Thread.sleep(2000);
+ }
+
+ Assert.assertTrue("'root' user could not read the replication table",
+ conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ));
+
+ Set<String> replRows = Sets.newHashSet();
+ Scanner scanner;
+ attempts = 5;
+ while (replRows.isEmpty() && attempts > 0) {
+ scanner = ReplicationTable.getScanner(conn);
+ StatusSection.limit(scanner);
+ for (Entry<Key,Value> entry : scanner) {
+ Key k = entry.getKey();
+
+ String fileUri = k.getRow().toString();
+ try {
+ new URI(fileUri);
+ } catch (URISyntaxException e) {
+ Assert.fail("Expected a valid URI: " + fileUri);
+ }
+
+ replRows.add(fileUri);
+ }
+ }
+
+ Set<String> wals = Sets.newHashSet();
+ attempts = 5;
+ Instance i = conn.getInstance();
+ ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
+ while (wals.isEmpty() && attempts > 0) {
+ WalStateManager markers = new WalStateManager(i, zk);
+ for (Entry<Path,WalState> entry : markers.getAllState().entrySet()) {
+ wals.add(entry.getKey().toString());
+ }
+ attempts--;
+ }
+
+ // We only have one file that should need replication (no trace table)
+ // We should find an entry in tablet and in the repl row
+ Assert.assertEquals("Rows found: " + replRows, 1, replRows.size());
+
+ // There should only be one extra WALog that replication doesn't know about
+ replRows.removeAll(wals);
+ Assert.assertEquals(2, wals.size());
+ Assert.assertEquals(0, replRows.size());
+ }
+
+ @Test
+ public void noRecordsWithoutReplication() throws Exception {
+ Connector conn = getConnector();
+ List<String> tables = new ArrayList<>();
+
+ // replication shouldn't be online when we begin
+ Assert.assertFalse(ReplicationTable.isOnline(conn));
+
+ for (int i = 0; i < 5; i++) {
+ String name = "table" + i;
+ tables.add(name);
+ conn.tableOperations().create(name);
+ }
+
+ // nor after we create some tables (that aren't being replicated)
+ Assert.assertFalse(ReplicationTable.isOnline(conn));
+
+ for (String table : tables) {
+ writeSomeData(conn, table, 5, 5);
+ }
+
+ // After writing data, still no replication table
+ Assert.assertFalse(ReplicationTable.isOnline(conn));
+
+ for (String table : tables) {
+ conn.tableOperations().compact(table, null, null, true, true);
+ }
+
+ // After compacting data, still no replication table
+ Assert.assertFalse(ReplicationTable.isOnline(conn));
+
+ for (String table : tables) {
+ conn.tableOperations().delete(table);
+ }
+
+ // After deleting tables, still no replication table
+ Assert.assertFalse(ReplicationTable.isOnline(conn));
+ }
+
+ @Test
+ public void twoEntriesForTwoTables() throws Exception {
+ Connector conn = getConnector();
+ String table1 = "table1", table2 = "table2";
+
+ // replication shouldn't exist when we begin
+ Assert.assertFalse("Replication table already online at the beginning of the test", ReplicationTable.isOnline(conn));
+
+ // Create two tables
+ conn.tableOperations().create(table1);
+ conn.tableOperations().create(table2);
+
+ // Enable replication on table1
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
+
+ // Despite having replication on, we shouldn't have any need to write a record to it (and bring it online)
+ Assert.assertFalse(ReplicationTable.isOnline(conn));
+
+ // Write some data to table1
+ writeSomeData(conn, table1, 50, 50);
+
+ // After the commit for these mutations finishes, we'll get a replication entry in accumulo.metadata for table1
+ // Don't want to compact table1 as it ultimately cause the entry in accumulo.metadata to be removed before we can verify it's there
+
+ // After writing data, we'll get a replication table online
+ boolean online = ReplicationTable.isOnline(conn);
+ int attempts = 10;
+ do {
+ if (!online) {
+ UtilWaitThread.sleep(5000);
+ online = ReplicationTable.isOnline(conn);
+ attempts--;
+ }
+ } while (!online && attempts > 0);
+ Assert.assertTrue("Replication table did not exist", online);
+
+ Assert.assertTrue(ReplicationTable.isOnline(conn));
+ conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.READ);
+
+ // Verify that we found a single replication record that's for table1
+ Scanner s = ReplicationTable.getScanner(conn);
+ StatusSection.limit(s);
+ Iterator<Entry<Key,Value>> iter = s.iterator();
+ attempts = 5;
+ while (attempts > 0) {
+ if (!iter.hasNext()) {
+ s.close();
+ Thread.sleep(1000);
+ s = ReplicationTable.getScanner(conn);
+ iter = s.iterator();
+ attempts--;
+ } else {
+ break;
+ }
+ }
+ Assert.assertTrue(iter.hasNext());
+ Entry<Key,Value> entry = iter.next();
+ // We should at least find one status record for this table, we might find a second if another log was started from ingesting the data
+ Assert.assertEquals("Expected to find replication entry for " + table1, conn.tableOperations().tableIdMap().get(table1), entry.getKey()
+ .getColumnQualifier().toString());
+ s.close();
+
+ // Enable replication on table2
+ conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION.getKey(), "true");
+
+ // Write some data to table2
+ writeSomeData(conn, table2, 50, 50);
+
+ // After the commit on these mutations, we'll get a replication entry in accumulo.metadata for table2
+ // Don't want to compact table2 as it ultimately cause the entry in accumulo.metadata to be removed before we can verify it's there
+
+ // After writing data, we'll get a replication table online
+ Assert.assertTrue(ReplicationTable.isOnline(conn));
+ conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.READ);
+
+ Set<String> tableIds = Sets.newHashSet(conn.tableOperations().tableIdMap().get(table1), conn.tableOperations().tableIdMap().get(table2));
+ Set<String> tableIdsForMetadata = Sets.newHashSet(tableIds);
+
+ // Wait to make sure the table permission propagate
+ Thread.sleep(5000);
+
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.ReplicationSection.getRange());
+
+ List<Entry<Key,Value>> records = new ArrayList<>();
+ for (Entry<Key,Value> metadata : s) {
+ records.add(metadata);
+ }
+
+ Assert.assertEquals("Expected to find 2 records, but actually found " + records, 2, records.size());
+
+ for (Entry<Key,Value> metadata : records) {
+ Assert.assertTrue("Expected record to be in metadata but wasn't " + metadata.getKey().toStringNoTruncate() + ", tableIds remaining "
+ + tableIdsForMetadata, tableIdsForMetadata.remove(metadata.getKey().getColumnQualifier().toString()));
+ }
+
+ Assert.assertTrue("Expected that we had removed all metadata entries " + tableIdsForMetadata, tableIdsForMetadata.isEmpty());
+
+ // Should be creating these records in replication table from metadata table every second
+ Thread.sleep(5000);
+
+ // Verify that we found two replication records: one for table1 and one for table2
+ s = ReplicationTable.getScanner(conn);
+ StatusSection.limit(s);
+ iter = s.iterator();
+ Assert.assertTrue("Found no records in replication table", iter.hasNext());
+ entry = iter.next();
+ Assert.assertTrue("Expected to find element in replication table", tableIds.remove(entry.getKey().getColumnQualifier().toString()));
+ Assert.assertTrue("Expected to find two elements in replication table, only found one ", iter.hasNext());
+ entry = iter.next();
+ Assert.assertTrue("Expected to find element in replication table", tableIds.remove(entry.getKey().getColumnQualifier().toString()));
+ Assert.assertFalse("Expected to only find two elements in replication table", iter.hasNext());
+ }
+
+ private void writeSomeData(Connector conn, String table, int rows, int cols) throws Exception {
+ BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+ for (int row = 0; row < rows; row++) {
+ Mutation m = new Mutation(Integer.toString(row));
+ for (int col = 0; col < cols; col++) {
+ String value = Integer.toString(col);
+ m.put(value, "", value);
+ }
+ bw.addMutation(m);
+ }
+ bw.close();
+ }
+
+ @Test
+ public void replicationEntriesPrecludeWalDeletion() throws Exception {
+ final Connector conn = getConnector();
+ String table1 = "table1", table2 = "table2", table3 = "table3";
+ final Multimap<String,String> logs = HashMultimap.create();
+ final AtomicBoolean keepRunning = new AtomicBoolean(true);
+
+ Thread t = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ // Should really be able to interrupt here, but the Scanner throws a fit to the logger
+ // when that happens
+ while (keepRunning.get()) {
+ try {
+ logs.putAll(getAllLogs(conn));
+ } catch (Exception e) {
+ log.error("Error getting logs", e);
+ }
+ }
+ }
+
+ });
+
+ t.start();
+
+ conn.tableOperations().create(table1);
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
+ Thread.sleep(2000);
+
+ // Write some data to table1
+ writeSomeData(conn, table1, 200, 500);
+
+ conn.tableOperations().create(table2);
+ conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION.getKey(), "true");
+ conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
+ Thread.sleep(2000);
+
+ writeSomeData(conn, table2, 200, 500);
+
+ conn.tableOperations().create(table3);
+ conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION.getKey(), "true");
+ conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
+ Thread.sleep(2000);
+
+ writeSomeData(conn, table3, 200, 500);
+
+ // Force a write to metadata for the data written
+ for (String table : Arrays.asList(table1, table2, table3)) {
+ conn.tableOperations().flush(table, null, null, true);
+ }
+
+ keepRunning.set(false);
+ t.join(5000);
+
+ // The master is only running every second to create records in the replication table from the metadata table
+ // Sleep a sufficient amount of time to ensure that we get the straggling WALs that might have been created at the end
+ Thread.sleep(5000);
+
+ Scanner s = ReplicationTable.getScanner(conn);
+ StatusSection.limit(s);
+ Set<String> replFiles = new HashSet<>();
+ for (Entry<Key,Value> entry : s) {
+ replFiles.add(entry.getKey().getRow().toString());
+ }
+
+ // We might have a WAL that was use solely for the replication table
+ // We want to remove that from our list as it should not appear in the replication table
+ String replicationTableId = conn.tableOperations().tableIdMap().get(ReplicationTable.NAME);
+ Iterator<Entry<String,String>> observedLogs = logs.entries().iterator();
+ while (observedLogs.hasNext()) {
+ Entry<String,String> observedLog = observedLogs.next();
+ if (replicationTableId.equals(observedLog.getValue())) {
+ log.info("Removing {} because its tableId is for the replication table", observedLog);
+ observedLogs.remove();
+ }
+ }
+
+ // We should have *some* reference to each log that was seen in the metadata table
+ // They might not yet all be closed though (might be newfile)
+ Assert.assertTrue("Metadata log distribution: " + logs + "replFiles " + replFiles, logs.keySet().containsAll(replFiles));
+ Assert.assertTrue("Difference between replication entries and current logs is bigger than one", logs.keySet().size() - replFiles.size() <= 1);
+
+ final Configuration conf = new Configuration();
+ for (String replFile : replFiles) {
+ Path p = new Path(replFile);
+ FileSystem fs = p.getFileSystem(conf);
+ Assert.assertTrue("File does not exist anymore, it was likely incorrectly garbage collected: " + p, fs.exists(p));
+ }
+ }
+
+ @Test
+ public void combinerWorksOnMetadata() throws Exception {
+ Connector conn = getConnector();
+
+ conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
+
+ ReplicationTableUtil.configureMetadataTable(conn, MetadataTable.NAME);
+
+ Status stat1 = StatusUtil.fileCreated(100);
+ Status stat2 = StatusUtil.fileClosed();
+
+ BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+ Mutation m = new Mutation(ReplicationSection.getRowPrefix() + "file:/accumulo/wals/tserver+port/uuid");
+ m.put(ReplicationSection.COLF, new Text("1"), ProtobufUtil.toValue(stat1));
+ bw.addMutation(m);
+ bw.close();
+
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(ReplicationSection.getRange());
+
+ Status actual = Status.parseFrom(Iterables.getOnlyElement(s).getValue().get());
+ Assert.assertEquals(stat1, actual);
+
+ bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+ m = new Mutation(ReplicationSection.getRowPrefix() + "file:/accumulo/wals/tserver+port/uuid");
+ m.put(ReplicationSection.COLF, new Text("1"), ProtobufUtil.toValue(stat2));
+ bw.addMutation(m);
+ bw.close();
+
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(ReplicationSection.getRange());
+
+ actual = Status.parseFrom(Iterables.getOnlyElement(s).getValue().get());
+ Status expected = Status.newBuilder().setBegin(0).setEnd(0).setClosed(true).setInfiniteEnd(true).setCreatedTime(100).build();
+
+ Assert.assertEquals(expected, actual);
+ }
+
+ @Test
+ public void noDeadlock() throws Exception {
+ final Connector conn = getConnector();
+
+ ReplicationTable.setOnline(conn);
+ conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.WRITE);
+ conn.tableOperations().deleteRows(ReplicationTable.NAME, null, null);
+
+ String table1 = "table1", table2 = "table2", table3 = "table3";
+ conn.tableOperations().create(table1);
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
+ conn.tableOperations().create(table2);
+ conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION.getKey(), "true");
+ conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
+ conn.tableOperations().create(table3);
+ conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION.getKey(), "true");
+ conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
+
+ writeSomeData(conn, table1, 200, 500);
+
+ writeSomeData(conn, table2, 200, 500);
+
+ writeSomeData(conn, table3, 200, 500);
+
+ // Flush everything to try to make the replication records
+ for (String table : Arrays.asList(table1, table2, table3)) {
+ conn.tableOperations().flush(table, null, null, true);
+ }
+
+ // Flush everything to try to make the replication records
+ for (String table : Arrays.asList(table1, table2, table3)) {
+ conn.tableOperations().flush(table, null, null, true);
+ }
+
+ for (String table : Arrays.asList(MetadataTable.NAME, table1, table2, table3)) {
+ Iterators.size(conn.createScanner(table, Authorizations.EMPTY).iterator());
+ }
+ }
+
+ @Test
+ public void filesClosedAfterUnused() throws Exception {
+ Connector conn = getConnector();
+
+ String table = "table";
+ conn.tableOperations().create(table);
+ String tableId = conn.tableOperations().tableIdMap().get(table);
+
+ Assert.assertNotNull(tableId);
+
+ conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
+ conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
+ // just sleep
+ conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
+ ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));
+
+ // Write a mutation to make a log file
+ BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+ Mutation m = new Mutation("one");
+ m.put("", "", "");
+ bw.addMutation(m);
+ bw.close();
+
+ // Write another to make sure the logger rolls itself?
+ bw = conn.createBatchWriter(table, new BatchWriterConfig());
+ m = new Mutation("three");
+ m.put("", "", "");
+ bw.addMutation(m);
+ bw.close();
+
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME);
+ s.setRange(TabletsSection.getRange(tableId));
+ Set<String> wals = new HashSet<>();
+ for (Entry<Key,Value> entry : s) {
+ LogEntry logEntry = LogEntry.fromKeyValue(entry.getKey(), entry.getValue());
+ wals.add(new Path(logEntry.filename).toString());
+ }
+
+ log.warn("Found wals {}", wals);
+
+ bw = conn.createBatchWriter(table, new BatchWriterConfig());
+ m = new Mutation("three");
+ byte[] bytes = new byte[1024 * 1024];
+ m.put("1".getBytes(), new byte[0], bytes);
+ m.put("2".getBytes(), new byte[0], bytes);
+ m.put("3".getBytes(), new byte[0], bytes);
+ m.put("4".getBytes(), new byte[0], bytes);
+ m.put("5".getBytes(), new byte[0], bytes);
+ bw.addMutation(m);
+ bw.close();
+
+ conn.tableOperations().flush(table, null, null, true);
+
+ while (!ReplicationTable.isOnline(conn)) {
+ UtilWaitThread.sleep(2000);
+ }
+
+ for (int i = 0; i < 10; i++) {
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.fetchColumnFamily(LogColumnFamily.NAME);
+ s.setRange(TabletsSection.getRange(tableId));
+ for (Entry<Key,Value> entry : s) {
+ log.info(entry.getKey().toStringNoTruncate() + "=" + entry.getValue());
+ }
+
+ try {
+ s = ReplicationTable.getScanner(conn);
+ StatusSection.limit(s);
+ Text buff = new Text();
+ boolean allReferencedLogsClosed = true;
+ int recordsFound = 0;
+ for (Entry<Key,Value> e : s) {
+ recordsFound++;
+ allReferencedLogsClosed = true;
+ StatusSection.getFile(e.getKey(), buff);
+ String file = buff.toString();
+ if (wals.contains(file)) {
+ Status stat = Status.parseFrom(e.getValue().get());
+ if (!stat.getClosed()) {
+ log.info("{} wasn't closed", file);
+ allReferencedLogsClosed = false;
+ }
+ }
+ }
+
+ if (recordsFound > 0 && allReferencedLogsClosed) {
+ return;
+ }
+ Thread.sleep(2000);
+ } catch (RuntimeException e) {
+ Throwable cause = e.getCause();
+ if (cause instanceof AccumuloSecurityException) {
+ AccumuloSecurityException ase = (AccumuloSecurityException) cause;
+ switch (ase.getSecurityErrorCode()) {
+ case PERMISSION_DENIED:
+ // We tried to read the replication table before the GRANT went through
+ Thread.sleep(2000);
+ break;
+ default:
+ throw e;
+ }
+ }
+ }
+ }
+
+ Assert.fail("We had a file that was referenced but didn't get closed");
+ }
+
+ @Test
+ public void singleTableWithSingleTarget() throws Exception {
+ // We want to kill the GC so it doesn't come along and close Status records and mess up the comparisons
+ // against expected Status messages.
+ getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
+
+ Connector conn = getConnector();
+ String table1 = "table1";
+
+ // replication shouldn't be online when we begin
+ Assert.assertFalse(ReplicationTable.isOnline(conn));
+
+ // Create a table
+ conn.tableOperations().create(table1);
+
+ int attempts = 10;
+
+ // Might think the table doesn't yet exist, retry
+ while (attempts > 0) {
+ try {
+ // Enable replication on table1
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
+ // Replicate table1 to cluster1 in the table with id of '4'
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "4");
+ // Sleep for 100 seconds before saying something is replicated
+ conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
+ ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "100000"));
+ break;
+ } catch (Exception e) {
+ attempts--;
+ if (attempts <= 0) {
+ throw e;
+ }
+ UtilWaitThread.sleep(2000);
+ }
+ }
+
+ // Write some data to table1
+ writeSomeData(conn, table1, 2000, 50);
+
+ // Make sure the replication table is online at this point
+ boolean online = ReplicationTable.isOnline(conn);
+ attempts = 10;
+ do {
+ if (!online) {
+ UtilWaitThread.sleep(2000);
+ online = ReplicationTable.isOnline(conn);
+ attempts--;
+ }
+ } while (!online && attempts > 0);
+ Assert.assertTrue("Replication table was never created", online);
+
+ // ACCUMULO-2743 The Observer in the tserver has to be made aware of the change to get the combiner (made by the master)
+ for (int i = 0; i < 10 && !conn.tableOperations().listIterators(ReplicationTable.NAME).keySet().contains(ReplicationTable.COMBINER_NAME); i++) {
+ UtilWaitThread.sleep(2000);
+ }
+
+ Assert.assertTrue("Combiner was never set on replication table",
+ conn.tableOperations().listIterators(ReplicationTable.NAME).keySet().contains(ReplicationTable.COMBINER_NAME));
+
+ // Trigger the minor compaction, waiting for it to finish.
+ // This should write the entry to metadata that the file has data
+ conn.tableOperations().flush(table1, null, null, true);
+
+ // Make sure that we have one status element, should be a new file
+ Scanner s = ReplicationTable.getScanner(conn);
+ StatusSection.limit(s);
+ Entry<Key,Value> entry = null;
+ Status expectedStatus = StatusUtil.openWithUnknownLength();
+ attempts = 10;
+ // This record will move from new to new with infinite length because of the minc (flush)
+ while (null == entry && attempts > 0) {
+ try {
+ entry = Iterables.getOnlyElement(s);
+ Status actual = Status.parseFrom(entry.getValue().get());
+ if (actual.getInfiniteEnd() != expectedStatus.getInfiniteEnd()) {
+ entry = null;
+ // the master process didn't yet fire and write the new mutation, wait for it to do
+ // so and try to read it again
+ Thread.sleep(1000);
+ }
+ } catch (NoSuchElementException e) {
+ entry = null;
+ Thread.sleep(500);
+ } catch (IllegalArgumentException e) {
+ // saw this contain 2 elements once
+ s = ReplicationTable.getScanner(conn);
+ StatusSection.limit(s);
+ for (Entry<Key,Value> content : s) {
+ log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ }
+ throw e;
+ } finally {
+ attempts--;
+ }
+ }
+
+ Assert.assertNotNull("Could not find expected entry in replication table", entry);
+ Status actual = Status.parseFrom(entry.getValue().get());
+ Assert.assertTrue("Expected to find a replication entry that is open with infinite length: " + ProtobufUtil.toString(actual),
+ !actual.getClosed() && actual.getInfiniteEnd());
+
+ // Try a couple of times to watch for the work record to be created
+ boolean notFound = true;
+ for (int i = 0; i < 10 && notFound; i++) {
+ s = ReplicationTable.getScanner(conn);
+ WorkSection.limit(s);
+ int elementsFound = Iterables.size(s);
+ if (0 < elementsFound) {
+ Assert.assertEquals(1, elementsFound);
+ notFound = false;
+ }
+ Thread.sleep(500);
+ }
+
+ // If we didn't find the work record, print the contents of the table
+ if (notFound) {
+ s = ReplicationTable.getScanner(conn);
+ for (Entry<Key,Value> content : s) {
+ log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ }
+ Assert.assertFalse("Did not find the work entry for the status entry", notFound);
+ }
+
+ // Write some more data so that we over-run the single WAL
+ writeSomeData(conn, table1, 3000, 50);
+
+ log.info("Issued compaction for table");
+ conn.tableOperations().compact(table1, null, null, true, true);
+ log.info("Compaction completed");
+
+ // Master is creating entries in the replication table from the metadata table every second.
+ // Compaction should trigger the record to be written to metadata. Wait a bit to ensure
+ // that the master has time to work.
+ Thread.sleep(5000);
+
+ s = ReplicationTable.getScanner(conn);
+ StatusSection.limit(s);
+ int numRecords = 0;
+ for (Entry<Key,Value> e : s) {
+ numRecords++;
+ log.info("Found status record {}\t{}", e.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(e.getValue().get())));
+ }
+
+ Assert.assertEquals(2, numRecords);
+
+ // We should eventually get 2 work records recorded, need to account for a potential delay though
+ // might see: status1 -> work1 -> status2 -> (our scans) -> work2
+ notFound = true;
+ for (int i = 0; i < 10 && notFound; i++) {
+ s = ReplicationTable.getScanner(conn);
+ WorkSection.limit(s);
+ int elementsFound = Iterables.size(s);
+ if (2 == elementsFound) {
+ notFound = false;
+ }
+ Thread.sleep(500);
+ }
+
+ // If we didn't find the work record, print the contents of the table
+ if (notFound) {
+ s = ReplicationTable.getScanner(conn);
+ for (Entry<Key,Value> content : s) {
+ log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ }
+ Assert.assertFalse("Did not find the work entries for the status entries", notFound);
+ }
+ }
+
+ @Test
+ public void correctClusterNameInWorkEntry() throws Exception {
+ Connector conn = getConnector();
+ String table1 = "table1";
+
+ // replication shouldn't be online when we begin
+ Assert.assertFalse(ReplicationTable.isOnline(conn));
+
+ // Create two tables
+ conn.tableOperations().create(table1);
+
+ int attempts = 5;
+ while (attempts > 0) {
+ try {
+ // Enable replication on table1
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
+ // Replicate table1 to cluster1 in the table with id of '4'
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "4");
+ attempts = 0;
+ } catch (Exception e) {
+ attempts--;
+ if (attempts <= 0) {
+ throw e;
+ }
+ UtilWaitThread.sleep(500);
+ }
+ }
+
+ // Write some data to table1
+ writeSomeData(conn, table1, 2000, 50);
+ conn.tableOperations().flush(table1, null, null, true);
+
+ String tableId = conn.tableOperations().tableIdMap().get(table1);
+ Assert.assertNotNull("Table ID was null", tableId);
+
+ // Make sure the replication table exists at this point
+ boolean online = ReplicationTable.isOnline(conn);
+ attempts = 5;
+ do {
+ if (!online) {
+ UtilWaitThread.sleep(500);
+ online = ReplicationTable.isOnline(conn);
+ attempts--;
+ }
+ } while (!online && attempts > 0);
+ Assert.assertTrue("Replication table did not exist", online);
+
+ for (int i = 0; i < 5 && !conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ); i++) {
+ Thread.sleep(1000);
+ }
+
+ Assert.assertTrue(conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ));
+
+ boolean notFound = true;
+ Scanner s;
+ for (int i = 0; i < 10 && notFound; i++) {
+ s = ReplicationTable.getScanner(conn);
+ WorkSection.limit(s);
+ try {
+ Entry<Key,Value> e = Iterables.getOnlyElement(s);
+ Text expectedColqual = new ReplicationTarget("cluster1", "4", tableId).toText();
+ Assert.assertEquals(expectedColqual, e.getKey().getColumnQualifier());
+ notFound = false;
+ } catch (NoSuchElementException e) {} catch (IllegalArgumentException e) {
+ s = ReplicationTable.getScanner(conn);
+ for (Entry<Key,Value> content : s) {
+ log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ }
+ Assert.fail("Found more than one work section entry");
+ }
+
+ Thread.sleep(500);
+ }
+
+ if (notFound) {
+ s = ReplicationTable.getScanner(conn);
+ for (Entry<Key,Value> content : s) {
+ log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ }
+ Assert.assertFalse("Did not find the work entry for the status entry", notFound);
+ }
+ }
+
+ @Test
+ public void replicationRecordsAreClosedAfterGarbageCollection() throws Exception {
+ getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
+
+ final Connector conn = getConnector();
+
+ ReplicationTable.setOnline(conn);
+ conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.WRITE);
+ conn.tableOperations().deleteRows(ReplicationTable.NAME, null, null);
+
+ final AtomicBoolean keepRunning = new AtomicBoolean(true);
+ final Set<String> metadataWals = new HashSet<>();
+
+ Thread t = new Thread(new Runnable() {
+ @Override
+ public void run() {
+ // Should really be able to interrupt here, but the Scanner throws a fit to the logger
+ // when that happens
+ while (keepRunning.get()) {
+ try {
+ metadataWals.addAll(getLogs(conn).keySet());
+ } catch (Exception e) {
+ log.error("Metadata table doesn't exist");
+ }
+ }
+ }
+
+ });
+
+ t.start();
+
+ String table1 = "table1", table2 = "table2", table3 = "table3";
+
+ try {
+ conn.tableOperations().create(table1);
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
+ conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
+ ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, null));
+
+ // Write some data to table1
+ writeSomeData(conn, table1, 200, 500);
+
+ conn.tableOperations().create(table2);
+ conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION.getKey(), "true");
+ conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
+
+ writeSomeData(conn, table2, 200, 500);
+
+ conn.tableOperations().create(table3);
+ conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION.getKey(), "true");
+ conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
+
+ writeSomeData(conn, table3, 200, 500);
+
+ // Flush everything to try to make the replication records
+ for (String table : Arrays.asList(table1, table2, table3)) {
+ conn.tableOperations().compact(table, null, null, true, true);
+ }
+ } finally {
+ keepRunning.set(false);
+ t.join(5000);
+ Assert.assertFalse(t.isAlive());
+ }
+
+ // Kill the tserver(s) and restart them
+ // to ensure that the WALs we previously observed all move to closed.
+ cluster.getClusterControl().stop(ServerType.TABLET_SERVER);
+ cluster.getClusterControl().start(ServerType.TABLET_SERVER);
+
+ // Make sure we can read all the tables (recovery complete)
+ for (String table : Arrays.asList(table1, table2, table3)) {
+ Iterators.size(conn.createScanner(table, Authorizations.EMPTY).iterator());
+ }
+
+ // Starting the gc will run CloseWriteAheadLogReferences which will first close Statuses
+ // in the metadata table, and then in the replication table
+ Process gc = cluster.exec(SimpleGarbageCollector.class);
+
+ waitForGCLock(conn);
+
+ Thread.sleep(1000);
+
+ log.info("GC is up and should have had time to run at least once by now");
+
+ try {
+ boolean allClosed = true;
+
+ // We should either find all closed records or no records
+ // After they're closed, they are candidates for deletion
+ for (int i = 0; i < 10; i++) {
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(Range.prefix(ReplicationSection.getRowPrefix()));
+ Iterator<Entry<Key,Value>> iter = s.iterator();
+
+ long recordsFound = 0l;
+ while (allClosed && iter.hasNext()) {
+ Entry<Key,Value> entry = iter.next();
+ String wal = entry.getKey().getRow().toString();
+ if (metadataWals.contains(wal)) {
+ Status status = Status.parseFrom(entry.getValue().get());
+ log.info("{}={}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(status));
+ allClosed &= status.getClosed();
+ recordsFound++;
+ }
+ }
+
+ log.info("Found {} records from the metadata table", recordsFound);
+ if (allClosed) {
+ break;
+ }
+
+ UtilWaitThread.sleep(2000);
+ }
+
+ if (!allClosed) {
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(Range.prefix(ReplicationSection.getRowPrefix()));
+ for (Entry<Key,Value> entry : s) {
+ log.info(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
+ }
+ Assert.fail("Expected all replication records in the metadata table to be closed");
+ }
+
+ for (int i = 0; i < 10; i++) {
+ allClosed = true;
+
+ Scanner s = ReplicationTable.getScanner(conn);
+ Iterator<Entry<Key,Value>> iter = s.iterator();
+
+ long recordsFound = 0l;
+ while (allClosed && iter.hasNext()) {
+ Entry<Key,Value> entry = iter.next();
+ String wal = entry.getKey().getRow().toString();
+ if (metadataWals.contains(wal)) {
+ Status status = Status.parseFrom(entry.getValue().get());
+ log.info("{}={}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(status));
+ allClosed &= status.getClosed();
+ recordsFound++;
+ }
+ }
+
+ log.info("Found {} records from the replication table", recordsFound);
+ if (allClosed) {
+ break;
+ }
+
+ UtilWaitThread.sleep(3000);
+ }
+
+ if (!allClosed) {
+ Scanner s = ReplicationTable.getScanner(conn);
+ StatusSection.limit(s);
+ for (Entry<Key,Value> entry : s) {
+ log.info(entry.getKey().toStringNoTruncate() + " " + TextFormat.shortDebugString(Status.parseFrom(entry.getValue().get())));
+ }
+ Assert.fail("Expected all replication records in the replication table to be closed");
+ }
+
+ } finally {
+ gc.destroy();
+ gc.waitFor();
+ }
+
+ }
+
+ @Test
+ public void replicatedStatusEntriesAreDeleted() throws Exception {
+ // Just stop it now, we'll restart it after we restart the tserver
+ getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
+
+ final Connector conn = getConnector();
+ log.info("Got connector to MAC");
+ String table1 = "table1";
+
+ // replication shouldn't be online when we begin
+ Assert.assertFalse(ReplicationTable.isOnline(conn));
+
+ // Create two tables
+ conn.tableOperations().create(table1);
+
+ int attempts = 5;
+ while (attempts > 0) {
+ try {
+ // Enable replication on table1
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
+ // Replicate table1 to cluster1 in the table with id of '4'
+ conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "4");
+ // Use the MockReplicaSystem impl and sleep for 5seconds
+ conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
+ ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "1000"));
+ attempts = 0;
+ } catch (Exception e) {
+ attempts--;
+ if (attempts <= 0) {
+ throw e;
+ }
+ UtilWaitThread.sleep(500);
+ }
+ }
+
+ String tableId = conn.tableOperations().tableIdMap().get(table1);
+ Assert.assertNotNull("Could not determine table id for " + table1, tableId);
+
+ // Write some data to table1
+ writeSomeData(conn, table1, 2000, 50);
+ conn.tableOperations().flush(table1, null, null, true);
+
+ // Make sure the replication table exists at this point
+ boolean online = ReplicationTable.isOnline(conn);
+ attempts = 10;
+ do {
+ if (!online) {
+ UtilWaitThread.sleep(1000);
+ online = ReplicationTable.isOnline(conn);
+ attempts--;
+ }
+ } while (!online && attempts > 0);
+ Assert.assertTrue("Replication table did not exist", online);
+
+ // Grant ourselves the write permission for later
+ conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.WRITE);
+
+ log.info("Checking for replication entries in replication");
+ // Then we need to get those records over to the replication table
+ Scanner s;
+ Set<String> entries = new HashSet<>();
+ for (int i = 0; i < 5; i++) {
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(ReplicationSection.getRange());
+ entries.clear();
+ for (Entry<Key,Value> entry : s) {
+ entries.add(entry.getKey().getRow().toString());
+ log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
+ }
+ if (!entries.isEmpty()) {
+ log.info("Replication entries {}", entries);
+ break;
+ }
+ Thread.sleep(1000);
+ }
+
+ Assert.assertFalse("Did not find any replication entries in the replication table", entries.isEmpty());
+
+ // Find the WorkSection record that will be created for that data we ingested
+ boolean notFound = true;
+ for (int i = 0; i < 10 && notFound; i++) {
+ try {
+ s = ReplicationTable.getScanner(conn);
+ WorkSection.limit(s);
+ Entry<Key,Value> e = Iterables.getOnlyElement(s);
+ log.info("Found entry: " + e.getKey().toStringNoTruncate());
+ Text expectedColqual = new ReplicationTarget("cluster1", "4", tableId).toText();
+ Assert.assertEquals(expectedColqual, e.getKey().getColumnQualifier());
+ notFound = false;
+ } catch (NoSuchElementException e) {
+
+ } catch (IllegalArgumentException e) {
+ // Somehow we got more than one element. Log what they were
+ s = ReplicationTable.getScanner(conn);
+ for (Entry<Key,Value> content : s) {
+ log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
+ }
+ Assert.fail("Found more than one work section entry");
+ } catch (RuntimeException e) {
+ // Catch a propagation issue, fail if it's not what we expect
+ Throwable cause = e.getCause();
+ if (cause instanceof AccumuloSecurityException) {
+ AccumuloSecurityException sec = (AccumuloSecurityException) cause;
+ switch (sec.getSecurityErrorCode()) {
+ case PERMISSION_DENIED:
+ // retry -- the grant didn't happen yet
+ log.warn("Sleeping because permission was denied");
+ break;
+ default:
+ throw e;
+ }
+ } else {
+ throw e;
+ }
+ }
+
+ Thread.sleep(2000);
+ }
+
+ if (notFound) {
+ s = ReplicationTable.getScanner(conn);
+ for (Entry<Key,Value> content : s) {
+ log.info(content.getKey().toStringNoTruncate() + " => " + ProtobufUtil.toString(Status.parseFrom(content.getValue().get())));
+ }
+ Assert.assertFalse("Did not find the work entry for the status entry", notFound);
+ }
+
+ /**
+ * By this point, we should have data ingested into a table, with at least one WAL as a candidate for replication. Compacting the table should close all
+ * open WALs, which should ensure all records we're going to replicate have entries in the replication table, and nothing will exist in the metadata table
+ * anymore
+ */
+
+ log.info("Killing tserver");
+ // Kill the tserver(s) and restart them
+ // to ensure that the WALs we previously observed all move to closed.
+ cluster.getClusterControl().stop(ServerType.TABLET_SERVER);
+
+ log.info("Starting tserver");
+ cluster.getClusterControl().start(ServerType.TABLET_SERVER);
+
+ log.info("Waiting to read tables");
+ UtilWaitThread.sleep(2 * 3 * 1000);
+
+ // Make sure we can read all the tables (recovery complete)
+ for (String table : new String[] {MetadataTable.NAME, table1}) {
+ Iterators.size(conn.createScanner(table, Authorizations.EMPTY).iterator());
+ }
+
+ log.info("Recovered metadata:");
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ for (Entry<Key,Value> entry : s) {
+ log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
+ }
+
+ cluster.getClusterControl().start(ServerType.GARBAGE_COLLECTOR);
+
+ // Wait for a bit since the GC has to run (should be running after a one second delay)
+ waitForGCLock(conn);
+
+ Thread.sleep(1000);
+
+ log.info("After GC");
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ for (Entry<Key,Value> entry : s) {
+ log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
+ }
+
+ // We expect no records in the metadata table after compaction. We have to poll
+ // because we have to wait for the StatusMaker's next iteration which will clean
+ // up the dangling *closed* records after we create the record in the replication table.
+ // We need the GC to close the file (CloseWriteAheadLogReferences) before we can remove the record
+ log.info("Checking metadata table for replication entries");
+ Set<String> remaining = new HashSet<>();
+ for (int i = 0; i < 10; i++) {
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(ReplicationSection.getRange());
+ remaining.clear();
+ for (Entry<Key,Value> e : s) {
+ remaining.add(e.getKey().getRow().toString());
+ }
+ remaining.retainAll(entries);
+ if (remaining.isEmpty()) {
+ break;
+ }
+ log.info("remaining {}", remaining);
+ Thread.sleep(2000);
+ log.info("");
+ }
+
+ Assert.assertTrue("Replication status messages were not cleaned up from metadata table", remaining.isEmpty());
+
+ /**
+ * After we close out and subsequently delete the metadata record, this will propagate to the replication table, which will cause those records to be
+ * deleted after replication occurs
+ */
+
+ int recordsFound = 0;
+ for (int i = 0; i < 30; i++) {
+ s = ReplicationTable.getScanner(conn);
+ recordsFound = 0;
+ for (Entry<Key,Value> entry : s) {
+ recordsFound++;
+ log.info("{} {}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
+ }
+
+ if (recordsFound <= 2) {
+ break;
+ } else {
+ Thread.sleep(1000);
+ log.info("");
+ }
+ }
+
+ Assert.assertTrue("Found unexpected replication records in the replication table", recordsFound <= 2);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/replication/ReplicationRandomWalkIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/ReplicationRandomWalkIT.java b/test/src/main/java/org/apache/accumulo/test/replication/ReplicationRandomWalkIT.java
new file mode 100644
index 0000000..80bc69d
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/replication/ReplicationRandomWalkIT.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.replication;
+
+import static org.apache.accumulo.core.conf.Property.TSERV_ARCHIVE_WALOGS;
+import static org.apache.accumulo.core.conf.Property.TSERV_WALOG_MAX_SIZE;
+
+import java.util.Properties;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.accumulo.test.randomwalk.Environment;
+import org.apache.accumulo.test.randomwalk.concurrent.Replication;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+public class ReplicationRandomWalkIT extends ConfigurableMacBase {
+
+ @Override
+ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(TSERV_ARCHIVE_WALOGS, "false");
+ cfg.setProperty(TSERV_WALOG_MAX_SIZE, "1M");
+ cfg.setNumTservers(1);
+ }
+
+ @Test(timeout = 5 * 60 * 1000)
+ public void runReplicationRandomWalkStep() throws Exception {
+ Replication r = new Replication();
+
+ Environment env = new Environment(new Properties()) {
+ @Override
+ public String getUserName() {
+ return "root";
+ }
+
+ @Override
+ public String getPassword() {
+ return ROOT_PASSWORD;
+ }
+
+ @Override
+ public Connector getConnector() throws AccumuloException, AccumuloSecurityException {
+ return ReplicationRandomWalkIT.this.getConnector();
+ }
+
+ };
+ r.visit(null, env, null);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java b/test/src/main/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java
new file mode 100644
index 0000000..b072aa7
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.replication;
+
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.protobuf.ProtobufUtil;
+import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
+import org.apache.accumulo.core.replication.ReplicationTable;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.apache.accumulo.server.replication.StatusUtil;
+import org.apache.accumulo.server.replication.proto.Replication.Status;
+import org.apache.accumulo.server.util.ReplicationTableUtil;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+public class StatusCombinerMacIT extends SharedMiniClusterBase {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void testCombinerSetOnMetadata() throws Exception {
+ TableOperations tops = getConnector().tableOperations();
+ Map<String,EnumSet<IteratorScope>> iterators = tops.listIterators(MetadataTable.NAME);
+
+ Assert.assertTrue(iterators.containsKey(ReplicationTableUtil.COMBINER_NAME));
+ EnumSet<IteratorScope> scopes = iterators.get(ReplicationTableUtil.COMBINER_NAME);
+ Assert.assertEquals(3, scopes.size());
+ Assert.assertTrue(scopes.contains(IteratorScope.scan));
+ Assert.assertTrue(scopes.contains(IteratorScope.minc));
+ Assert.assertTrue(scopes.contains(IteratorScope.majc));
+
+ Iterable<Entry<String,String>> propIter = tops.getProperties(MetadataTable.NAME);
+ HashMap<String,String> properties = new HashMap<String,String>();
+ for (Entry<String,String> entry : propIter) {
+ properties.put(entry.getKey(), entry.getValue());
+ }
+
+ for (IteratorScope scope : scopes) {
+ String key = Property.TABLE_ITERATOR_PREFIX.getKey() + scope.name() + "." + ReplicationTableUtil.COMBINER_NAME + ".opt.columns";
+ Assert.assertTrue("Properties did not contain key : " + key, properties.containsKey(key));
+ Assert.assertEquals(MetadataSchema.ReplicationSection.COLF.toString(), properties.get(key));
+ }
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector conn = getConnector();
+ ClusterUser user = getAdminUser();
+
+ ReplicationTable.setOnline(conn);
+ conn.securityOperations().grantTablePermission(user.getPrincipal(), ReplicationTable.NAME, TablePermission.WRITE);
+ BatchWriter bw = ReplicationTable.getBatchWriter(conn);
+ long createTime = System.currentTimeMillis();
+ try {
+ Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
+ StatusSection.add(m, new Text("1"), StatusUtil.fileCreatedValue(createTime));
+ bw.addMutation(m);
+ } finally {
+ bw.close();
+ }
+
+ Scanner s = ReplicationTable.getScanner(conn);
+ Entry<Key,Value> entry = Iterables.getOnlyElement(s);
+ Assert.assertEquals(StatusUtil.fileCreatedValue(createTime), entry.getValue());
+
+ bw = ReplicationTable.getBatchWriter(conn);
+ try {
+ Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
+ StatusSection.add(m, new Text("1"), ProtobufUtil.toValue(StatusUtil.replicated(Long.MAX_VALUE)));
+ bw.addMutation(m);
+ } finally {
+ bw.close();
+ }
+
+ s = ReplicationTable.getScanner(conn);
+ entry = Iterables.getOnlyElement(s);
+ Status stat = Status.parseFrom(entry.getValue().get());
+ Assert.assertEquals(Long.MAX_VALUE, stat.getBegin());
+ }
+
+}
[25/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java
new file mode 100644
index 0000000..48dfdbd
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.replication;
+
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.harness.AccumuloITBase;
+import org.apache.accumulo.harness.MiniClusterConfigurationCallback;
+import org.apache.accumulo.harness.MiniClusterHarness;
+import org.apache.accumulo.harness.TestingKdc;
+import org.apache.accumulo.master.replication.SequentialWorkAssigner;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.accumulo.server.replication.ReplicaSystemFactory;
+import org.apache.accumulo.test.functional.KerberosIT;
+import org.apache.accumulo.tserver.TabletServer;
+import org.apache.accumulo.tserver.replication.AccumuloReplicaSystem;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+
+/**
+ * Ensure that replication occurs using keytabs instead of password (not to mention SASL)
+ */
+public class KerberosReplicationIT extends AccumuloITBase {
+ private static final Logger log = LoggerFactory.getLogger(KerberosIT.class);
+
+ private static TestingKdc kdc;
+ private static String krbEnabledForITs = null;
+ private static ClusterUser rootUser;
+
+ @BeforeClass
+ public static void startKdc() throws Exception {
+ kdc = new TestingKdc();
+ kdc.start();
+ krbEnabledForITs = System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION);
+ if (null == krbEnabledForITs || !Boolean.parseBoolean(krbEnabledForITs)) {
+ System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, "true");
+ }
+ rootUser = kdc.getRootUser();
+ }
+
+ @AfterClass
+ public static void stopKdc() throws Exception {
+ if (null != kdc) {
+ kdc.stop();
+ }
+ if (null != krbEnabledForITs) {
+ System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs);
+ }
+ }
+
+ private MiniAccumuloClusterImpl primary, peer;
+ private String PRIMARY_NAME = "primary", PEER_NAME = "peer";
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60 * 3;
+ }
+
+ private MiniClusterConfigurationCallback getConfigCallback(final String name) {
+ return new MiniClusterConfigurationCallback() {
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
+ cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "2M");
+ cfg.setProperty(Property.GC_CYCLE_START, "1s");
+ cfg.setProperty(Property.GC_CYCLE_DELAY, "5s");
+ cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
+ cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
+ cfg.setProperty(Property.REPLICATION_NAME, name);
+ cfg.setProperty(Property.REPLICATION_MAX_UNIT_SIZE, "8M");
+ cfg.setProperty(Property.REPLICATION_WORK_ASSIGNER, SequentialWorkAssigner.class.getName());
+ cfg.setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX, "1M");
+ coreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+ };
+ }
+
+ @Before
+ public void setup() throws Exception {
+ MiniClusterHarness harness = new MiniClusterHarness();
+
+ // Create a primary and a peer instance, both with the same "root" user
+ primary = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"), getConfigCallback(PRIMARY_NAME), kdc);
+ primary.start();
+
+ peer = harness.create(getClass().getName(), testName.getMethodName() + "_peer", new PasswordToken("unused"), getConfigCallback(PEER_NAME), kdc);
+ peer.start();
+
+ // Enable kerberos auth
+ Configuration conf = new Configuration(false);
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+ }
+
+ @After
+ public void teardown() throws Exception {
+ if (null != peer) {
+ peer.stop();
+ }
+ if (null != primary) {
+ primary.stop();
+ }
+ }
+
+ @Test
+ public void dataReplicatedToCorrectTable() throws Exception {
+ // Login as the root user
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+
+ final KerberosToken token = new KerberosToken();
+ final Connector primaryConn = primary.getConnector(rootUser.getPrincipal(), token);
+ final Connector peerConn = peer.getConnector(rootUser.getPrincipal(), token);
+
+ ClusterUser replicationUser = kdc.getClientPrincipal(0);
+
+ // Create user for replication to the peer
+ peerConn.securityOperations().createLocalUser(replicationUser.getPrincipal(), null);
+
+ primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + PEER_NAME, replicationUser.getPrincipal());
+ primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_KEYTAB.getKey() + PEER_NAME, replicationUser.getKeytab().getAbsolutePath());
+
+ // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
+ primaryConn.instanceOperations().setProperty(
+ Property.REPLICATION_PEERS.getKey() + PEER_NAME,
+ ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
+ AccumuloReplicaSystem.buildConfiguration(peerConn.getInstance().getInstanceName(), peerConn.getInstance().getZooKeepers())));
+
+ String primaryTable1 = "primary", peerTable1 = "peer";
+
+ // Create tables
+ primaryConn.tableOperations().create(primaryTable1);
+ String masterTableId1 = primaryConn.tableOperations().tableIdMap().get(primaryTable1);
+ Assert.assertNotNull(masterTableId1);
+
+ peerConn.tableOperations().create(peerTable1);
+ String peerTableId1 = peerConn.tableOperations().tableIdMap().get(peerTable1);
+ Assert.assertNotNull(peerTableId1);
+
+ // Grant write permission
+ peerConn.securityOperations().grantTablePermission(replicationUser.getPrincipal(), peerTable1, TablePermission.WRITE);
+
+ // Replicate this table to the peerClusterName in a table with the peerTableId table id
+ primaryConn.tableOperations().setProperty(primaryTable1, Property.TABLE_REPLICATION.getKey(), "true");
+ primaryConn.tableOperations().setProperty(primaryTable1, Property.TABLE_REPLICATION_TARGET.getKey() + PEER_NAME, peerTableId1);
+
+ // Write some data to table1
+ BatchWriter bw = primaryConn.createBatchWriter(primaryTable1, new BatchWriterConfig());
+ long masterTable1Records = 0l;
+ for (int rows = 0; rows < 2500; rows++) {
+ Mutation m = new Mutation(primaryTable1 + rows);
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ masterTable1Records++;
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ log.info("Wrote all data to primary cluster");
+
+ Set<String> filesFor1 = primaryConn.replicationOperations().referencedFiles(primaryTable1);
+
+ // Restart the tserver to force a close on the WAL
+ for (ProcessReference proc : primary.getProcesses().get(ServerType.TABLET_SERVER)) {
+ primary.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+ primary.exec(TabletServer.class);
+
+ log.info("Restarted the tserver");
+
+ // Read the data -- the tserver is back up and running and tablets are assigned
+ Iterators.size(primaryConn.createScanner(primaryTable1, Authorizations.EMPTY).iterator());
+
+ // Wait for both tables to be replicated
+ log.info("Waiting for {} for {}", filesFor1, primaryTable1);
+ primaryConn.replicationOperations().drain(primaryTable1, filesFor1);
+
+ long countTable = 0l;
+ for (Entry<Key,Value> entry : peerConn.createScanner(peerTable1, Authorizations.EMPTY)) {
+ countTable++;
+ Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
+ .startsWith(primaryTable1));
+ }
+
+ log.info("Found {} records in {}", countTable, peerTable1);
+ Assert.assertEquals(masterTable1Records, countTable);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
new file mode 100644
index 0000000..b6888db
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
@@ -0,0 +1,731 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.replication;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
+import org.apache.accumulo.core.protobuf.ProtobufUtil;
+import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection;
+import org.apache.accumulo.core.replication.ReplicationTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.master.replication.SequentialWorkAssigner;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.accumulo.server.replication.ReplicaSystemFactory;
+import org.apache.accumulo.server.replication.StatusUtil;
+import org.apache.accumulo.server.replication.proto.Replication.Status;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.accumulo.tserver.TabletServer;
+import org.apache.accumulo.tserver.replication.AccumuloReplicaSystem;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+
+/**
+ * Replication tests which start at least two MAC instances and replicate data between them
+ */
+public class MultiInstanceReplicationIT extends ConfigurableMacBase {
+ private static final Logger log = LoggerFactory.getLogger(MultiInstanceReplicationIT.class);
+
+ private ExecutorService executor;
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 10 * 60;
+ }
+
+ @Before
+ public void createExecutor() {
+ executor = Executors.newSingleThreadExecutor();
+ }
+
+ @After
+ public void stopExecutor() {
+ if (null != executor) {
+ executor.shutdownNow();
+ }
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "3s");
+ cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "2M");
+ cfg.setProperty(Property.GC_CYCLE_START, "1s");
+ cfg.setProperty(Property.GC_CYCLE_DELAY, "5s");
+ cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
+ cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
+ cfg.setProperty(Property.REPLICATION_MAX_UNIT_SIZE, "8M");
+ cfg.setProperty(Property.REPLICATION_NAME, "master");
+ cfg.setProperty(Property.REPLICATION_WORK_ASSIGNER, SequentialWorkAssigner.class.getName());
+ cfg.setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX, "1M");
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ /**
+ * Use the same SSL and credential provider configuration that is set up by AbstractMacIT for the other MAC used for replication
+ */
+ private void updatePeerConfigFromPrimary(MiniAccumuloConfigImpl primaryCfg, MiniAccumuloConfigImpl peerCfg) {
+ // Set the same SSL information from the primary when present
+ Map<String,String> primarySiteConfig = primaryCfg.getSiteConfig();
+ if ("true".equals(primarySiteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
+ Map<String,String> peerSiteConfig = new HashMap<String,String>();
+ peerSiteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true");
+ String keystorePath = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PATH.getKey());
+ Assert.assertNotNull("Keystore Path was null", keystorePath);
+ peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), keystorePath);
+ String truststorePath = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PATH.getKey());
+ Assert.assertNotNull("Truststore Path was null", truststorePath);
+ peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), truststorePath);
+
+ // Passwords might be stored in CredentialProvider
+ String keystorePassword = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey());
+ if (null != keystorePassword) {
+ peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), keystorePassword);
+ }
+ String truststorePassword = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey());
+ if (null != truststorePassword) {
+ peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword);
+ }
+
+ System.out.println("Setting site configuration for peer " + peerSiteConfig);
+ peerCfg.setSiteConfig(peerSiteConfig);
+ }
+
+ // Use the CredentialProvider if the primary also uses one
+ String credProvider = primarySiteConfig.get(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
+ if (null != credProvider) {
+ Map<String,String> peerSiteConfig = peerCfg.getSiteConfig();
+ peerSiteConfig.put(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey(), credProvider);
+ peerCfg.setSiteConfig(peerSiteConfig);
+ }
+ }
+
+ @Test(timeout = 10 * 60 * 1000)
+ public void dataWasReplicatedToThePeer() throws Exception {
+ MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
+ ROOT_PASSWORD);
+ peerCfg.setNumTservers(1);
+ peerCfg.setInstanceName("peer");
+ peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
+
+ updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
+
+ MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
+
+ peerCluster.start();
+
+ try {
+ final Connector connMaster = getConnector();
+ final Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
+
+ ReplicationTable.setOnline(connMaster);
+
+ String peerUserName = "peer", peerPassword = "foo";
+
+ String peerClusterName = "peer";
+
+ connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
+
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
+
+ // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
+ connMaster.instanceOperations().setProperty(
+ Property.REPLICATION_PEERS.getKey() + peerClusterName,
+ ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
+ AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
+
+ final String masterTable = "master", peerTable = "peer";
+
+ connMaster.tableOperations().create(masterTable);
+ String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
+ Assert.assertNotNull(masterTableId);
+
+ connPeer.tableOperations().create(peerTable);
+ String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
+ Assert.assertNotNull(peerTableId);
+
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
+
+ // Replicate this table to the peerClusterName in a table with the peerTableId table id
+ connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
+
+ // Write some data to table1
+ BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
+ for (int rows = 0; rows < 5000; rows++) {
+ Mutation m = new Mutation(Integer.toString(rows));
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ log.info("Wrote all data to master cluster");
+
+ final Set<String> filesNeedingReplication = connMaster.replicationOperations().referencedFiles(masterTable);
+
+ log.info("Files to replicate: " + filesNeedingReplication);
+
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+ cluster.exec(TabletServer.class);
+
+ log.info("TabletServer restarted");
+ Iterators.size(ReplicationTable.getScanner(connMaster).iterator());
+ log.info("TabletServer is online");
+
+ while (!ReplicationTable.isOnline(connMaster)) {
+ log.info("Replication table still offline, waiting");
+ Thread.sleep(5000);
+ }
+
+ log.info("");
+ log.info("Fetching metadata records:");
+ for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+ if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
+ log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ } else {
+ log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
+ }
+ }
+
+ log.info("");
+ log.info("Fetching replication records:");
+ for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
+ log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ }
+
+ Future<Boolean> future = executor.submit(new Callable<Boolean>() {
+
+ @Override
+ public Boolean call() throws Exception {
+ connMaster.replicationOperations().drain(masterTable, filesNeedingReplication);
+ log.info("Drain completed");
+ return true;
+ }
+
+ });
+
+ try {
+ future.get(60, TimeUnit.SECONDS);
+ } catch (TimeoutException e) {
+ future.cancel(true);
+ Assert.fail("Drain did not finish within 60 seconds");
+ } finally {
+ executor.shutdownNow();
+ }
+
+ log.info("drain completed");
+
+ log.info("");
+ log.info("Fetching metadata records:");
+ for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+ if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
+ log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ } else {
+ log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
+ }
+ }
+
+ log.info("");
+ log.info("Fetching replication records:");
+ for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
+ log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ }
+
+ Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
+ Iterator<Entry<Key,Value>> masterIter = master.iterator(), peerIter = peer.iterator();
+ Entry<Key,Value> masterEntry = null, peerEntry = null;
+ while (masterIter.hasNext() && peerIter.hasNext()) {
+ masterEntry = masterIter.next();
+ peerEntry = peerIter.next();
+ Assert.assertEquals(masterEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0,
+ masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
+ Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
+ }
+
+ log.info("Last master entry: " + masterEntry);
+ log.info("Last peer entry: " + peerEntry);
+
+ Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
+ Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
+ } finally {
+ peerCluster.stop();
+ }
+ }
+
+ @Test
+ public void dataReplicatedToCorrectTable() throws Exception {
+ MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
+ ROOT_PASSWORD);
+ peerCfg.setNumTservers(1);
+ peerCfg.setInstanceName("peer");
+ peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
+
+ updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
+
+ MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);
+
+ peer1Cluster.start();
+
+ try {
+ Connector connMaster = getConnector();
+ Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
+
+ String peerClusterName = "peer";
+ String peerUserName = "peer", peerPassword = "foo";
+
+ // Create local user
+ connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
+
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
+
+ // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
+ connMaster.instanceOperations().setProperty(
+ Property.REPLICATION_PEERS.getKey() + peerClusterName,
+ ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
+ AccumuloReplicaSystem.buildConfiguration(peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));
+
+ String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2";
+
+ // Create tables
+ connMaster.tableOperations().create(masterTable1);
+ String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
+ Assert.assertNotNull(masterTableId1);
+
+ connMaster.tableOperations().create(masterTable2);
+ String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
+ Assert.assertNotNull(masterTableId2);
+
+ connPeer.tableOperations().create(peerTable1);
+ String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
+ Assert.assertNotNull(peerTableId1);
+
+ connPeer.tableOperations().create(peerTable2);
+ String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
+ Assert.assertNotNull(peerTableId2);
+
+ // Grant write permission
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);
+
+ // Replicate this table to the peerClusterName in a table with the peerTableId table id
+ connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1);
+
+ connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2);
+
+ // Write some data to table1
+ BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
+ long masterTable1Records = 0l;
+ for (int rows = 0; rows < 2500; rows++) {
+ Mutation m = new Mutation(masterTable1 + rows);
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ masterTable1Records++;
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ // Write some data to table2
+ bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
+ long masterTable2Records = 0l;
+ for (int rows = 0; rows < 2500; rows++) {
+ Mutation m = new Mutation(masterTable2 + rows);
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ masterTable2Records++;
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ log.info("Wrote all data to master cluster");
+
+ Set<String> filesFor1 = connMaster.replicationOperations().referencedFiles(masterTable1), filesFor2 = connMaster.replicationOperations().referencedFiles(
+ masterTable2);
+
+ log.info("Files to replicate for table1: " + filesFor1);
+ log.info("Files to replicate for table2: " + filesFor2);
+
+ // Restart the tserver to force a close on the WAL
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+ cluster.exec(TabletServer.class);
+
+ log.info("Restarted the tserver");
+
+ // Read the data -- the tserver is back up and running
+ Iterators.size(connMaster.createScanner(masterTable1, Authorizations.EMPTY).iterator());
+
+ while (!ReplicationTable.isOnline(connMaster)) {
+ log.info("Replication table still offline, waiting");
+ Thread.sleep(5000);
+ }
+
+ // Wait for both tables to be replicated
+ log.info("Waiting for {} for {}", filesFor1, masterTable1);
+ connMaster.replicationOperations().drain(masterTable1, filesFor1);
+
+ log.info("Waiting for {} for {}", filesFor2, masterTable2);
+ connMaster.replicationOperations().drain(masterTable2, filesFor2);
+
+ long countTable = 0l;
+ for (Entry<Key,Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
+ countTable++;
+ Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
+ .startsWith(masterTable1));
+ }
+
+ log.info("Found {} records in {}", countTable, peerTable1);
+ Assert.assertEquals(masterTable1Records, countTable);
+
+ countTable = 0l;
+ for (Entry<Key,Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
+ countTable++;
+ Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
+ .startsWith(masterTable2));
+ }
+
+ log.info("Found {} records in {}", countTable, peerTable2);
+ Assert.assertEquals(masterTable2Records, countTable);
+
+ } finally {
+ peer1Cluster.stop();
+ }
+ }
+
+ @Test
+ public void dataWasReplicatedToThePeerWithoutDrain() throws Exception {
+ MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
+ ROOT_PASSWORD);
+ peerCfg.setNumTservers(1);
+ peerCfg.setInstanceName("peer");
+ peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
+
+ updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
+
+ MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
+
+ peerCluster.start();
+
+ Connector connMaster = getConnector();
+ Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
+
+ String peerUserName = "repl";
+ String peerPassword = "passwd";
+
+ // Create a user on the peer for replication to use
+ connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
+
+ String peerClusterName = "peer";
+
+ // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
+ connMaster.instanceOperations().setProperty(
+ Property.REPLICATION_PEERS.getKey() + peerClusterName,
+ ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
+ AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
+
+ // Configure the credentials we should use to authenticate ourselves to the peer for replication
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
+
+ String masterTable = "master", peerTable = "peer";
+
+ connMaster.tableOperations().create(masterTable);
+ String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
+ Assert.assertNotNull(masterTableId);
+
+ connPeer.tableOperations().create(peerTable);
+ String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
+ Assert.assertNotNull(peerTableId);
+
+ // Give our replication user the ability to write to the table
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
+
+ // Replicate this table to the peerClusterName in a table with the peerTableId table id
+ connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
+
+ // Write some data to table1
+ BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
+ for (int rows = 0; rows < 5000; rows++) {
+ Mutation m = new Mutation(Integer.toString(rows));
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ log.info("Wrote all data to master cluster");
+
+ Set<String> files = connMaster.replicationOperations().referencedFiles(masterTable);
+
+ log.info("Files to replicate:" + files);
+
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+
+ cluster.exec(TabletServer.class);
+
+ while (!ReplicationTable.isOnline(connMaster)) {
+ log.info("Replication table still offline, waiting");
+ Thread.sleep(5000);
+ }
+
+ Iterators.size(connMaster.createScanner(masterTable, Authorizations.EMPTY).iterator());
+
+ for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
+ log.debug(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ }
+
+ connMaster.replicationOperations().drain(masterTable, files);
+
+ Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
+ Iterator<Entry<Key,Value>> masterIter = master.iterator(), peerIter = peer.iterator();
+ while (masterIter.hasNext() && peerIter.hasNext()) {
+ Entry<Key,Value> masterEntry = masterIter.next(), peerEntry = peerIter.next();
+ Assert.assertEquals(peerEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0,
+ masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
+ Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
+ }
+
+ Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
+ Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
+
+ peerCluster.stop();
+ }
+
+ @Test
+ public void dataReplicatedToCorrectTableWithoutDrain() throws Exception {
+ MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
+ ROOT_PASSWORD);
+ peerCfg.setNumTservers(1);
+ peerCfg.setInstanceName("peer");
+ peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
+
+ updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
+
+ MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);
+
+ peer1Cluster.start();
+
+ try {
+ Connector connMaster = getConnector();
+ Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
+
+ String peerClusterName = "peer";
+
+ String peerUserName = "repl";
+ String peerPassword = "passwd";
+
+ // Create a user on the peer for replication to use
+ connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
+
+ // Configure the credentials we should use to authenticate ourselves to the peer for replication
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
+
+ // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
+ connMaster.instanceOperations().setProperty(
+ Property.REPLICATION_PEERS.getKey() + peerClusterName,
+ ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
+ AccumuloReplicaSystem.buildConfiguration(peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));
+
+ String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2";
+
+ connMaster.tableOperations().create(masterTable1);
+ String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
+ Assert.assertNotNull(masterTableId1);
+
+ connMaster.tableOperations().create(masterTable2);
+ String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
+ Assert.assertNotNull(masterTableId2);
+
+ connPeer.tableOperations().create(peerTable1);
+ String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
+ Assert.assertNotNull(peerTableId1);
+
+ connPeer.tableOperations().create(peerTable2);
+ String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
+ Assert.assertNotNull(peerTableId2);
+
+ // Give our replication user the ability to write to the tables
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);
+
+ // Replicate this table to the peerClusterName in a table with the peerTableId table id
+ connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1);
+
+ connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2);
+
+ // Write some data to table1
+ BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
+ for (int rows = 0; rows < 2500; rows++) {
+ Mutation m = new Mutation(masterTable1 + rows);
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ // Write some data to table2
+ bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
+ for (int rows = 0; rows < 2500; rows++) {
+ Mutation m = new Mutation(masterTable2 + rows);
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ log.info("Wrote all data to master cluster");
+
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+
+ cluster.exec(TabletServer.class);
+
+ while (!ReplicationTable.isOnline(connMaster)) {
+ log.info("Replication table still offline, waiting");
+ Thread.sleep(5000);
+ }
+
+ // Wait until we fully replicated something
+ boolean fullyReplicated = false;
+ for (int i = 0; i < 10 && !fullyReplicated; i++) {
+ UtilWaitThread.sleep(2000);
+
+ Scanner s = ReplicationTable.getScanner(connMaster);
+ WorkSection.limit(s);
+ for (Entry<Key,Value> entry : s) {
+ Status status = Status.parseFrom(entry.getValue().get());
+ if (StatusUtil.isFullyReplicated(status)) {
+ fullyReplicated |= true;
+ }
+ }
+ }
+
+ Assert.assertNotEquals(0, fullyReplicated);
+
+ // We have to wait for the master to assign the replication work, a local tserver to process it, and then the remote tserver to replay it
+ // Be cautious in how quickly we assert that the data is present on the peer
+ long countTable = 0l;
+ for (int i = 0; i < 10; i++) {
+ for (Entry<Key,Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
+ countTable++;
+ Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
+ .startsWith(masterTable1));
+ }
+
+ log.info("Found {} records in {}", countTable, peerTable1);
+
+ if (0l == countTable) {
+ Thread.sleep(5000);
+ } else {
+ break;
+ }
+ }
+
+ Assert.assertTrue("Found no records in " + peerTable1 + " in the peer cluster", countTable > 0);
+
+ // We have to wait for the master to assign the replication work, a local tserver to process it, and then the remote tserver to replay it
+ // Be cautious in how quickly we assert that the data is present on the peer
+ for (int i = 0; i < 10; i++) {
+ countTable = 0l;
+ for (Entry<Key,Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
+ countTable++;
+ Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
+ .startsWith(masterTable2));
+ }
+
+ log.info("Found {} records in {}", countTable, peerTable2);
+
+ if (0l == countTable) {
+ Thread.sleep(5000);
+ } else {
+ break;
+ }
+ }
+
+ Assert.assertTrue("Found no records in " + peerTable2 + " in the peer cluster", countTable > 0);
+
+ } finally {
+ peer1Cluster.stop();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/replication/MultiTserverReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/MultiTserverReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/MultiTserverReplicationIT.java
new file mode 100644
index 0000000..72cb569
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/replication/MultiTserverReplicationIT.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.replication;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.replication.ReplicationConstants;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.ZooReader;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterables;
+import com.google.common.net.HostAndPort;
+
+/**
+ *
+ */
+public class MultiTserverReplicationIT extends ConfigurableMacBase {
+ private static final Logger log = LoggerFactory.getLogger(MultiTserverReplicationIT.class);
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(2);
+ }
+
+ @Test
+ public void tserverReplicationServicePortsAreAdvertised() throws Exception {
+ // Wait for the cluster to be up
+ Connector conn = getConnector();
+ Instance inst = conn.getInstance();
+
+ // Wait for a tserver to come up to fulfill this request
+ conn.tableOperations().create("foo");
+ Scanner s = conn.createScanner("foo", Authorizations.EMPTY);
+ Assert.assertEquals(0, Iterables.size(s));
+
+ ZooReader zreader = new ZooReader(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut());
+ Set<String> tserverHost = new HashSet<>();
+ tserverHost.addAll(zreader.getChildren(ZooUtil.getRoot(inst) + Constants.ZTSERVERS));
+
+ Set<HostAndPort> replicationServices = new HashSet<>();
+
+ for (String tserver : tserverHost) {
+ try {
+ byte[] portData = zreader.getData(ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_TSERVERS + "/" + tserver, null);
+ HostAndPort replAddress = HostAndPort.fromString(new String(portData, UTF_8));
+ replicationServices.add(replAddress);
+ } catch (Exception e) {
+ log.error("Could not find port for {}", tserver, e);
+ Assert.fail("Did not find replication port advertisement for " + tserver);
+ }
+ }
+
+ // Each tserver should also have equial replicaiton services running internally
+ Assert.assertEquals("Expected an equal number of replication servicers and tservers", tserverHost.size(), replicationServices.size());
+ }
+
+ @Test
+ public void masterReplicationServicePortsAreAdvertised() throws Exception {
+ // Wait for the cluster to be up
+ Connector conn = getConnector();
+ Instance inst = conn.getInstance();
+
+ // Wait for a tserver to come up to fulfill this request
+ conn.tableOperations().create("foo");
+ Scanner s = conn.createScanner("foo", Authorizations.EMPTY);
+ Assert.assertEquals(0, Iterables.size(s));
+
+ ZooReader zreader = new ZooReader(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut());
+
+ // Should have one master instance
+ Assert.assertEquals(1, inst.getMasterLocations().size());
+
+ // Get the master thrift service addr
+ String masterAddr = Iterables.getOnlyElement(inst.getMasterLocations());
+
+ // Get the master replication coordinator addr
+ String replCoordAddr = new String(zreader.getData(ZooUtil.getRoot(inst) + Constants.ZMASTER_REPLICATION_COORDINATOR_ADDR, null), UTF_8);
+
+ // They shouldn't be the same
+ Assert.assertNotEquals(masterAddr, replCoordAddr);
+
+ // Neither should be zero as the port
+ Assert.assertNotEquals(0, HostAndPort.fromString(masterAddr).getPort());
+ Assert.assertNotEquals(0, HostAndPort.fromString(replCoordAddr).getPort());
+ }
+}
[19/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/GarbageCollectWALIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/GarbageCollectWALIT.java b/test/src/test/java/org/apache/accumulo/test/GarbageCollectWALIT.java
deleted file mode 100644
index 141ee27..0000000
--- a/test/src/test/java/org/apache/accumulo/test/GarbageCollectWALIT.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocatedFileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.fs.RemoteIterator;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class GarbageCollectWALIT extends ConfigurableMacBase {
-
- @Override
- protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.INSTANCE_ZK_HOST, "5s");
- cfg.setProperty(Property.GC_CYCLE_START, "1s");
- cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
- cfg.setNumTservers(1);
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Test(timeout = 2 * 60 * 1000)
- public void test() throws Exception {
- // not yet, please
- String tableName = getUniqueNames(1)[0];
- cluster.getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
- Connector c = getConnector();
- c.tableOperations().create(tableName);
- // count the number of WALs in the filesystem
- assertEquals(2, countWALsInFS(cluster));
- cluster.getClusterControl().stop(ServerType.TABLET_SERVER);
- cluster.getClusterControl().start(ServerType.GARBAGE_COLLECTOR);
- cluster.getClusterControl().start(ServerType.TABLET_SERVER);
- Iterators.size(c.createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
- // let GC run
- UtilWaitThread.sleep(3 * 5 * 1000);
- assertEquals(2, countWALsInFS(cluster));
- }
-
- private int countWALsInFS(MiniAccumuloClusterImpl cluster) throws Exception {
- FileSystem fs = cluster.getFileSystem();
- RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(new Path(cluster.getConfig().getAccumuloDir() + "/wal"), true);
- int result = 0;
- while (iterator.hasNext()) {
- LocatedFileStatus next = iterator.next();
- if (!next.isDirectory()) {
- result++;
- }
- }
- return result;
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/ImportExportIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ImportExportIT.java b/test/src/test/java/org/apache/accumulo/test/ImportExportIT.java
deleted file mode 100644
index 55d83f5..0000000
--- a/test/src/test/java/org/apache/accumulo/test/ImportExportIT.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.BufferedReader;
-import java.io.InputStreamReader;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * ImportTable didn't correctly place absolute paths in metadata. This resulted in the imported table only being usable when the actual HDFS directory for
- * Accumulo was the same as Property.INSTANCE_DFS_DIR. If any other HDFS directory was used, any interactions with the table would fail because the relative
- * path in the metadata table (created by the ImportTable process) would be converted to a non-existent absolute path.
- * <p>
- * ACCUMULO-3215
- *
- */
-public class ImportExportIT extends AccumuloClusterHarness {
-
- private static final Logger log = LoggerFactory.getLogger(ImportExportIT.class);
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void testExportImportThenScan() throws Exception {
- Connector conn = getConnector();
-
- String[] tableNames = getUniqueNames(2);
- String srcTable = tableNames[0], destTable = tableNames[1];
- conn.tableOperations().create(srcTable);
-
- BatchWriter bw = conn.createBatchWriter(srcTable, new BatchWriterConfig());
- for (int row = 0; row < 1000; row++) {
- Mutation m = new Mutation(Integer.toString(row));
- for (int col = 0; col < 100; col++) {
- m.put(Integer.toString(col), "", Integer.toString(col * 2));
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- conn.tableOperations().compact(srcTable, null, null, true, true);
-
- // Make a directory we can use to throw the export and import directories
- // Must exist on the filesystem the cluster is running.
- FileSystem fs = cluster.getFileSystem();
- Path tmp = cluster.getTemporaryPath();
- log.info("Using FileSystem: " + fs);
- Path baseDir = new Path(tmp, getClass().getName());
- if (fs.exists(baseDir)) {
- log.info("{} exists on filesystem, deleting", baseDir);
- assertTrue("Failed to deleted " + baseDir, fs.delete(baseDir, true));
- }
- log.info("Creating {}", baseDir);
- assertTrue("Failed to create " + baseDir, fs.mkdirs(baseDir));
- Path exportDir = new Path(baseDir, "export");
- Path importDir = new Path(baseDir, "import");
- for (Path p : new Path[] {exportDir, importDir}) {
- assertTrue("Failed to create " + baseDir, fs.mkdirs(p));
- }
-
- FsShell fsShell = new FsShell(fs.getConf());
- assertEquals("Failed to chmod " + baseDir, 0, fsShell.run(new String[] {"-chmod", "-R", "777", baseDir.toString()}));
-
- log.info("Exporting table to {}", exportDir);
- log.info("Importing table from {}", importDir);
-
- // Offline the table
- conn.tableOperations().offline(srcTable, true);
- // Then export it
- conn.tableOperations().exportTable(srcTable, exportDir.toString());
-
- // Make sure the distcp.txt file that exporttable creates is available
- Path distcp = new Path(exportDir, "distcp.txt");
- Assert.assertTrue("Distcp file doesn't exist", fs.exists(distcp));
- FSDataInputStream is = fs.open(distcp);
- BufferedReader reader = new BufferedReader(new InputStreamReader(is));
-
- // Copy each file that was exported to the import directory
- String line;
- while (null != (line = reader.readLine())) {
- Path p = new Path(line.substring(5));
- Assert.assertTrue("File doesn't exist: " + p, fs.exists(p));
-
- Path dest = new Path(importDir, p.getName());
- Assert.assertFalse("Did not expect " + dest + " to exist", fs.exists(dest));
- FileUtil.copy(fs, p, fs, dest, false, fs.getConf());
- }
-
- reader.close();
-
- log.info("Import dir: {}", Arrays.toString(fs.listStatus(importDir)));
-
- // Import the exported data into a new table
- conn.tableOperations().importTable(destTable, importDir.toString());
-
- // Get the table ID for the table that the importtable command created
- final String tableId = conn.tableOperations().tableIdMap().get(destTable);
- Assert.assertNotNull(tableId);
-
- // Get all `file` colfams from the metadata table for the new table
- log.info("Imported into table with ID: {}", tableId);
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
- MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(s);
-
- // Should find a single entry
- for (Entry<Key,Value> fileEntry : s) {
- Key k = fileEntry.getKey();
- String value = fileEntry.getValue().toString();
- if (k.getColumnFamily().equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
- // The file should be an absolute URI (file:///...), not a relative path (/b-000.../I000001.rf)
- String fileUri = k.getColumnQualifier().toString();
- Assert.assertFalse("Imported files should have absolute URIs, not relative: " + fileUri, looksLikeRelativePath(fileUri));
- } else if (k.getColumnFamily().equals(MetadataSchema.TabletsSection.ServerColumnFamily.NAME)) {
- Assert.assertFalse("Server directory should have absolute URI, not relative: " + value, looksLikeRelativePath(value));
- } else {
- Assert.fail("Got expected pair: " + k + "=" + fileEntry.getValue());
- }
- }
-
- // Online the original table before we verify equivalence
- conn.tableOperations().online(srcTable, true);
-
- verifyTableEquality(conn, srcTable, destTable);
- }
-
- private void verifyTableEquality(Connector conn, String srcTable, String destTable) throws Exception {
- Iterator<Entry<Key,Value>> src = conn.createScanner(srcTable, Authorizations.EMPTY).iterator(), dest = conn.createScanner(destTable, Authorizations.EMPTY)
- .iterator();
- Assert.assertTrue("Could not read any data from source table", src.hasNext());
- Assert.assertTrue("Could not read any data from destination table", dest.hasNext());
- while (src.hasNext() && dest.hasNext()) {
- Entry<Key,Value> orig = src.next(), copy = dest.next();
- Assert.assertEquals(orig.getKey(), copy.getKey());
- Assert.assertEquals(orig.getValue(), copy.getValue());
- }
- Assert.assertFalse("Source table had more data to read", src.hasNext());
- Assert.assertFalse("Dest table had more data to read", dest.hasNext());
- }
-
- private boolean looksLikeRelativePath(String uri) {
- if (uri.startsWith("/" + Constants.BULK_PREFIX)) {
- if ('/' == uri.charAt(10)) {
- return true;
- }
- } else if (uri.startsWith("/" + Constants.CLONE_PREFIX)) {
- return true;
- }
-
- return false;
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/IntegrationTestMapReduce.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/IntegrationTestMapReduce.java b/test/src/test/java/org/apache/accumulo/test/IntegrationTestMapReduce.java
deleted file mode 100644
index e33f3a9..0000000
--- a/test/src/test/java/org/apache/accumulo/test/IntegrationTestMapReduce.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.runner.Description;
-import org.junit.runner.JUnitCore;
-import org.junit.runner.Result;
-import org.junit.runner.notification.Failure;
-import org.junit.runner.notification.RunListener;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class IntegrationTestMapReduce extends Configured implements Tool {
-
- private static final Logger log = LoggerFactory.getLogger(IntegrationTestMapReduce.class);
-
- public static class TestMapper extends Mapper<LongWritable,Text,IntWritable,Text> {
-
- @Override
- protected void map(LongWritable key, Text value, final Mapper<LongWritable,Text,IntWritable,Text>.Context context) throws IOException, InterruptedException {
- String className = value.toString();
- if (className.trim().isEmpty()) {
- return;
- }
- Class<? extends Object> test = null;
- try {
- test = Class.forName(className);
- } catch (ClassNotFoundException e) {
- log.debug("Error finding class {}", className, e);
- context.write(new IntWritable(-1), new Text(e.toString()));
- return;
- }
- JUnitCore core = new JUnitCore();
- core.addListener(new RunListener() {
-
- @Override
- public void testStarted(Description description) throws Exception {
- log.info("Starting {}", description);
- context.progress();
- }
-
- @Override
- public void testFinished(Description description) throws Exception {
- log.info("Finished {}", description);
- context.progress();
- }
-
- @Override
- public void testFailure(Failure failure) throws Exception {
- log.info("Test failed: {}", failure.getDescription(), failure.getException());
- context.progress();
- }
-
- });
- log.info("Running test {}", className);
- try {
- Result result = core.run(test);
- if (result.wasSuccessful()) {
- log.info("{} was successful", className);
- context.write(new IntWritable(0), value);
- } else {
- log.info("{} failed", className);
- context.write(new IntWritable(1), value);
- }
- } catch (Exception e) {
- // most likely JUnit issues, like no tests to run
- log.info("Test failed: {}", className, e);
- }
- }
- }
-
- public static class TestReducer extends Reducer<IntWritable,Text,IntWritable,Text> {
-
- @Override
- protected void reduce(IntWritable code, Iterable<Text> tests, Reducer<IntWritable,Text,IntWritable,Text>.Context context) throws IOException,
- InterruptedException {
- StringBuffer result = new StringBuffer();
- for (Text test : tests) {
- result.append(test);
- result.append("\n");
- }
- context.write(code, new Text(result.toString()));
- }
- }
-
- @Override
- public int run(String[] args) throws Exception {
- // read a list of tests from the input, and print out the results
- if (args.length != 2) {
- System.err.println("Wrong number of args: <input> <output>");
- }
- Configuration conf = getConf();
- Job job = Job.getInstance(conf, "accumulo integration test runner");
- // read one line at a time
- job.setInputFormatClass(NLineInputFormat.class);
- conf.setInt(NLineInputFormat.LINES_PER_MAP, 1);
-
- // run the test
- job.setJarByClass(IntegrationTestMapReduce.class);
- job.setMapperClass(TestMapper.class);
-
- // group test by result code
- job.setReducerClass(TestReducer.class);
- job.setOutputKeyClass(IntWritable.class);
- job.setOutputValueClass(Text.class);
-
- FileInputFormat.addInputPath(job, new Path(args[0]));
- FileOutputFormat.setOutputPath(job, new Path(args[1]));
- return job.waitForCompletion(true) ? 0 : 1;
- }
-
- public static void main(String[] args) throws Exception {
- System.exit(ToolRunner.run(new IntegrationTestMapReduce(), args));
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/InterruptibleScannersIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/InterruptibleScannersIT.java b/test/src/test/java/org/apache/accumulo/test/InterruptibleScannersIT.java
deleted file mode 100644
index a272bc2..0000000
--- a/test/src/test/java/org/apache/accumulo/test/InterruptibleScannersIT.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.ActiveScan;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.SlowIterator;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-// ACCUMULO-3030
-public class InterruptibleScannersIT extends AccumuloClusterHarness {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- }
-
- @Test
- public void test() throws Exception {
- // make a table
- final String tableName = getUniqueNames(1)[0];
- final Connector conn = getConnector();
- conn.tableOperations().create(tableName);
- // make the world's slowest scanner
- final Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
- final IteratorSetting cfg = new IteratorSetting(100, SlowIterator.class);
- // Wait long enough to be sure we can catch it, but not indefinitely.
- SlowIterator.setSeekSleepTime(cfg, 60 * 1000);
- scanner.addScanIterator(cfg);
- // create a thread to interrupt the slow scan
- final Thread scanThread = Thread.currentThread();
- Thread thread = new Thread() {
- @Override
- public void run() {
- try {
- // ensure the scan is running: not perfect, the metadata tables could be scanned, too.
- String tserver = conn.instanceOperations().getTabletServers().iterator().next();
- do {
- ArrayList<ActiveScan> scans = new ArrayList<ActiveScan>(conn.instanceOperations().getActiveScans(tserver));
- Iterator<ActiveScan> iter = scans.iterator();
- while (iter.hasNext()) {
- ActiveScan scan = iter.next();
- // Remove scans not against our table and not owned by us
- if (!getAdminPrincipal().equals(scan.getUser()) || !tableName.equals(scan.getTable())) {
- iter.remove();
- }
- }
-
- if (!scans.isEmpty()) {
- // We found our scan
- break;
- }
- } while (true);
- } catch (Exception e) {
- e.printStackTrace();
- }
- // BAM!
- scanThread.interrupt();
- }
- };
- thread.start();
- try {
- // Use the scanner, expect problems
- Iterators.size(scanner.iterator());
- Assert.fail("Scan should not succeed");
- } catch (Exception ex) {} finally {
- thread.join();
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityIT.java b/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityIT.java
deleted file mode 100644
index b0734b4..0000000
--- a/test/src/test/java/org/apache/accumulo/test/KeyValueEqualityIT.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class KeyValueEqualityIT extends AccumuloClusterHarness {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void testEquality() throws Exception {
- Connector conn = this.getConnector();
- final BatchWriterConfig config = new BatchWriterConfig();
-
- final String[] tables = getUniqueNames(2);
- final String table1 = tables[0], table2 = tables[1];
- final TableOperations tops = conn.tableOperations();
- tops.create(table1);
- tops.create(table2);
-
- final BatchWriter bw1 = conn.createBatchWriter(table1, config), bw2 = conn.createBatchWriter(table2, config);
-
- for (int row = 0; row < 100; row++) {
- Mutation m = new Mutation(Integer.toString(row));
- for (int col = 0; col < 10; col++) {
- m.put(Integer.toString(col), "", System.currentTimeMillis(), Integer.toString(col * 2));
- }
- bw1.addMutation(m);
- bw2.addMutation(m);
- }
-
- bw1.close();
- bw2.close();
-
- Iterator<Entry<Key,Value>> t1 = conn.createScanner(table1, Authorizations.EMPTY).iterator(), t2 = conn.createScanner(table2, Authorizations.EMPTY)
- .iterator();
- while (t1.hasNext() && t2.hasNext()) {
- // KeyValue, the implementation of Entry<Key,Value>, should support equality and hashCode properly
- Entry<Key,Value> e1 = t1.next(), e2 = t2.next();
- Assert.assertEquals(e1, e2);
- Assert.assertEquals(e1.hashCode(), e2.hashCode());
- }
- Assert.assertFalse("table1 had more data to read", t1.hasNext());
- Assert.assertFalse("table2 had more data to read", t2.hasNext());
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/LargeSplitRowIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/LargeSplitRowIT.java b/test/src/test/java/org/apache/accumulo/test/LargeSplitRowIT.java
deleted file mode 100644
index 479bb0e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/LargeSplitRowIT.java
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.AccumuloServerException;
-import org.apache.accumulo.core.client.impl.Namespaces;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.conf.TableConfiguration;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LargeSplitRowIT extends ConfigurableMacBase {
- static private final Logger log = LoggerFactory.getLogger(LargeSplitRowIT.class);
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
-
- Map<String,String> siteConfig = new HashMap<String,String>();
- siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "50ms");
- cfg.setSiteConfig(siteConfig);
- }
-
- // User added split
- @Test(timeout = 60 * 1000)
- public void userAddedSplit() throws Exception {
-
- log.info("User added split");
-
- // make a table and lower the TABLE_END_ROW_MAX_SIZE property
- final String tableName = getUniqueNames(1)[0];
- final Connector conn = getConnector();
- conn.tableOperations().create(tableName);
- conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
-
- // Create a BatchWriter and add a mutation to the table
- BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("Row");
- m.put("cf", "cq", "value");
- batchWriter.addMutation(m);
- batchWriter.close();
-
- // Create a split point that is too large to be an end row and fill it with all 'm'
- SortedSet<Text> partitionKeys = new TreeSet<Text>();
- byte data[] = new byte[(int) (TableConfiguration.getMemoryInBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
- for (int i = 0; i < data.length; i++) {
- data[i] = 'm';
- }
- partitionKeys.add(new Text(data));
-
- // try to add the split point that is too large, if the split point is created the test fails.
- try {
- conn.tableOperations().addSplits(tableName, partitionKeys);
- Assert.fail();
- } catch (AccumuloServerException e) {}
-
- // Make sure that the information that was written to the table before we tried to add the split point is still correct
- int counter = 0;
- final Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
- for (Entry<Key,Value> entry : scanner) {
- counter++;
- Key k = entry.getKey();
- Assert.assertEquals("Row", k.getRow().toString());
- Assert.assertEquals("cf", k.getColumnFamily().toString());
- Assert.assertEquals("cq", k.getColumnQualifier().toString());
- Assert.assertEquals("value", entry.getValue().toString());
-
- }
- // Make sure there is only one line in the table
- Assert.assertEquals(1, counter);
- }
-
- // Test tablet server split with 250 entries with all the same prefix
- @Test(timeout = 60 * 1000)
- public void automaticSplitWith250Same() throws Exception {
- log.info("Automatic with 250 with same prefix");
-
- // make a table and lower the configure properties
- final String tableName = getUniqueNames(1)[0];
- final Connector conn = getConnector();
- conn.tableOperations().create(tableName);
- conn.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
- conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
- conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64");
- conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
-
- // Create a BatchWriter and key for a table entry that is longer than the allowed size for an end row
- // Fill this key with all m's except the last spot
- BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
- byte data[] = new byte[(int) (TableConfiguration.getMemoryInBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
- for (int i = 0; i < data.length - 1; i++) {
- data[i] = (byte) 'm';
- }
-
- // Make the last place in the key different for every entry added to the table
- for (int i = 0; i < 250; i++) {
- data[data.length - 1] = (byte) i;
- Mutation m = new Mutation(data);
- m.put("cf", "cq", "value");
- batchWriter.addMutation(m);
- }
- // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time for the table to split if need be.
- batchWriter.close();
- conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
- Thread.sleep(500);
-
- // Make sure all the data that was put in the table is still correct
- int count = 0;
- final Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
- for (Entry<Key,Value> entry : scanner) {
- Key k = entry.getKey();
- data[data.length - 1] = (byte) count;
- String expected = new String(data, UTF_8);
- Assert.assertEquals(expected, k.getRow().toString());
- Assert.assertEquals("cf", k.getColumnFamily().toString());
- Assert.assertEquals("cq", k.getColumnQualifier().toString());
- Assert.assertEquals("value", entry.getValue().toString());
- count++;
- }
- Assert.assertEquals(250, count);
-
- // Make sure no splits occurred in the table
- Assert.assertEquals(0, conn.tableOperations().listSplits(tableName).size());
- }
-
- // 10 0's; 10 2's; 10 4's... 10 30's etc
- @Test(timeout = 60 * 1000)
- public void automaticSplitWithGaps() throws Exception {
- log.info("Automatic Split With Gaps");
-
- automaticSplit(30, 2);
- }
-
- // 10 0's; 10 1's; 10 2's... 10 15's etc
- @Test(timeout = 60 * 1000)
- public void automaticSplitWithoutGaps() throws Exception {
- log.info("Automatic Split Without Gaps");
-
- automaticSplit(15, 1);
- }
-
- @Test(timeout = 60 * 1000)
- public void automaticSplitLater() throws Exception {
- log.info("Split later");
- automaticSplit(15, 1);
-
- final Connector conn = getConnector();
-
- String tableName = new String();
- java.util.Iterator<String> iterator = conn.tableOperations().list().iterator();
-
- while (iterator.hasNext()) {
- String curr = iterator.next();
- if (!curr.startsWith(Namespaces.ACCUMULO_NAMESPACE + ".")) {
- tableName = curr;
- }
- }
-
- // Create a BatchWriter and key for a table entry that is longer than the allowed size for an end row
- BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
- byte data[] = new byte[10];
-
- // Fill key with all j's except for last spot which alternates through 1 through 10 for every j value
- for (int j = 15; j < 150; j += 1) {
- for (int i = 0; i < data.length - 1; i++) {
- data[i] = (byte) j;
- }
-
- for (int i = 0; i < 25; i++) {
- data[data.length - 1] = (byte) i;
- Mutation m = new Mutation(data);
- m.put("cf", "cq", "value");
- batchWriter.addMutation(m);
- }
- }
- // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time for the table to split if need be.
- batchWriter.close();
- conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
-
- // Make sure a split occurs
- while (conn.tableOperations().listSplits(tableName).size() == 0) {
- Thread.sleep(250);
- }
-
- Assert.assertTrue(0 < conn.tableOperations().listSplits(tableName).size());
- }
-
- private void automaticSplit(int max, int spacing) throws Exception {
- // make a table and lower the configure properties
- final String tableName = getUniqueNames(1)[0];
- final Connector conn = getConnector();
- conn.tableOperations().create(tableName);
- conn.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
- conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
- conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64");
- conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
-
- // Create a BatchWriter and key for a table entry that is longer than the allowed size for an end row
- BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
- byte data[] = new byte[(int) (TableConfiguration.getMemoryInBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
-
- // Fill key with all j's except for last spot which alternates through 1 through 10 for every j value
- for (int j = 0; j < max; j += spacing) {
- for (int i = 0; i < data.length - 1; i++) {
- data[i] = (byte) j;
- }
-
- for (int i = 0; i < 10; i++) {
- data[data.length - 1] = (byte) i;
- Mutation m = new Mutation(data);
- m.put("cf", "cq", "value");
- batchWriter.addMutation(m);
- }
- }
- // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time for the table to split if need be.
- batchWriter.close();
- conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
- Thread.sleep(500);
-
- // Make sure all the data that was put in the table is still correct
- int count = 0;
- int extra = 10;
- final Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
- for (Entry<Key,Value> entry : scanner) {
- if (extra == 10) {
- extra = 0;
- for (int i = 0; i < data.length - 1; i++) {
- data[i] = (byte) count;
- }
- count += spacing;
-
- }
- Key k = entry.getKey();
- data[data.length - 1] = (byte) extra;
- String expected = new String(data, UTF_8);
- Assert.assertEquals(expected, k.getRow().toString());
- Assert.assertEquals("cf", k.getColumnFamily().toString());
- Assert.assertEquals("cq", k.getColumnQualifier().toString());
- Assert.assertEquals("value", entry.getValue().toString());
- extra++;
- }
- Assert.assertEquals(10, extra);
- Assert.assertEquals(max, count);
-
- // Make sure no splits occured in the table
- Assert.assertEquals(0, conn.tableOperations().listSplits(tableName).size());
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java b/test/src/test/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java
deleted file mode 100644
index 9babeba..0000000
--- a/test/src/test/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-
-import java.util.HashSet;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.master.state.MetaDataStateStore;
-import org.apache.accumulo.server.master.state.RootTabletStateStore;
-import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.master.state.TabletLocationState;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class MasterRepairsDualAssignmentIT extends ConfigurableMacBase {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 5 * 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- cfg.setProperty(Property.MASTER_RECOVERY_DELAY, "5s");
- // use raw local file system so walogs sync and flush will work
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Test
- public void test() throws Exception {
- // make some tablets, spread 'em around
- Connector c = getConnector();
- ClientContext context = new ClientContext(c.getInstance(), new Credentials("root", new PasswordToken(ROOT_PASSWORD)), getClientConfig());
- String table = this.getUniqueNames(1)[0];
- c.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
- c.securityOperations().grantTablePermission("root", RootTable.NAME, TablePermission.WRITE);
- c.tableOperations().create(table);
- SortedSet<Text> partitions = new TreeSet<Text>();
- for (String part : "a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")) {
- partitions.add(new Text(part));
- }
- c.tableOperations().addSplits(table, partitions);
- // scan the metadata table and get the two table location states
- Set<TServerInstance> states = new HashSet<TServerInstance>();
- Set<TabletLocationState> oldLocations = new HashSet<TabletLocationState>();
- MetaDataStateStore store = new MetaDataStateStore(context, null);
- while (states.size() < 2) {
- UtilWaitThread.sleep(250);
- oldLocations.clear();
- for (TabletLocationState tls : store) {
- if (tls.current != null) {
- states.add(tls.current);
- oldLocations.add(tls);
- }
- }
- }
- assertEquals(2, states.size());
- // Kill a tablet server... we don't care which one... wait for everything to be reassigned
- cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
- Set<TServerInstance> replStates = new HashSet<>();
- // Find out which tablet server remains
- while (true) {
- UtilWaitThread.sleep(1000);
- states.clear();
- replStates.clear();
- boolean allAssigned = true;
- for (TabletLocationState tls : store) {
- if (tls != null && tls.current != null) {
- states.add(tls.current);
- } else if (tls != null && tls.extent.equals(new KeyExtent(new Text(ReplicationTable.ID), null, null))) {
- replStates.add(tls.current);
- } else {
- allAssigned = false;
- }
- }
- System.out.println(states + " size " + states.size() + " allAssigned " + allAssigned);
- if (states.size() != 2 && allAssigned == true)
- break;
- }
- assertEquals(1, replStates.size());
- assertEquals(1, states.size());
- // pick an assigned tablet and assign it to the old tablet
- TabletLocationState moved = null;
- for (TabletLocationState old : oldLocations) {
- if (!states.contains(old.current)) {
- moved = old;
- }
- }
- assertNotEquals(null, moved);
- // throw a mutation in as if we were the dying tablet
- BatchWriter bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
- Mutation assignment = new Mutation(moved.extent.getMetadataEntry());
- moved.current.putLocation(assignment);
- bw.addMutation(assignment);
- bw.close();
- // wait for the master to fix the problem
- waitForCleanStore(store);
- // now jam up the metadata table
- bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
- assignment = new Mutation(new KeyExtent(new Text(MetadataTable.ID), null, null).getMetadataEntry());
- moved.current.putLocation(assignment);
- bw.addMutation(assignment);
- bw.close();
- waitForCleanStore(new RootTabletStateStore(context, null));
- }
-
- private void waitForCleanStore(MetaDataStateStore store) {
- while (true) {
- try {
- Iterators.size(store.iterator());
- } catch (Exception ex) {
- System.out.println(ex);
- UtilWaitThread.sleep(250);
- continue;
- }
- break;
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/MetaConstraintRetryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/MetaConstraintRetryIT.java b/test/src/test/java/org/apache/accumulo/test/MetaConstraintRetryIT.java
deleted file mode 100644
index 727859f..0000000
--- a/test/src/test/java/org/apache/accumulo/test/MetaConstraintRetryIT.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.test;
-
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.Writer;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MetaConstraintRetryIT extends AccumuloClusterHarness {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 30;
- }
-
- // a test for ACCUMULO-3096
- @Test(expected = ConstraintViolationException.class)
- public void test() throws Exception {
-
- getConnector().securityOperations().grantTablePermission(getAdminPrincipal(), MetadataTable.NAME, TablePermission.WRITE);
-
- Credentials credentials = new Credentials(getAdminPrincipal(), getAdminToken());
- ClientContext context = new ClientContext(getConnector().getInstance(), credentials, cluster.getClientConfig());
- Writer w = new Writer(context, MetadataTable.ID);
- KeyExtent extent = new KeyExtent(new Text("5"), null, null);
-
- Mutation m = new Mutation(extent.getMetadataEntry());
- // unknown columns should cause contraint violation
- m.put("badcolfam", "badcolqual", "3");
-
- try {
- MetadataTableUtil.update(w, null, m);
- } catch (RuntimeException e) {
- if (e.getCause().getClass().equals(ConstraintViolationException.class)) {
- throw (ConstraintViolationException) e.getCause();
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/MetaGetsReadersIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/MetaGetsReadersIT.java b/test/src/test/java/org/apache/accumulo/test/MetaGetsReadersIT.java
deleted file mode 100644
index 84a5996..0000000
--- a/test/src/test/java/org/apache/accumulo/test/MetaGetsReadersIT.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.test.functional.SlowIterator;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class MetaGetsReadersIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- cfg.setProperty(Property.TSERV_SCAN_MAX_OPENFILES, "2");
- cfg.setProperty(Property.TABLE_BLOCKCACHE_ENABLED, "false");
- }
-
- private static Thread slowScan(final Connector c, final String tableName, final AtomicBoolean stop) {
- Thread thread = new Thread() {
- @Override
- public void run() {
- try {
- while (stop.get() == false) {
- Scanner s = c.createScanner(tableName, Authorizations.EMPTY);
- IteratorSetting is = new IteratorSetting(50, SlowIterator.class);
- SlowIterator.setSleepTime(is, 10);
- s.addScanIterator(is);
- Iterator<Entry<Key,Value>> iterator = s.iterator();
- while (iterator.hasNext() && stop.get() == false) {
- iterator.next();
- }
- }
- } catch (Exception ex) {
- log.trace("{}", ex.getMessage(), ex);
- stop.set(true);
- }
- }
- };
- return thread;
- }
-
- @Test(timeout = 2 * 60 * 1000)
- public void test() throws Exception {
- final String tableName = getUniqueNames(1)[0];
- final Connector c = getConnector();
- c.tableOperations().create(tableName);
- Random random = new Random();
- BatchWriter bw = c.createBatchWriter(tableName, null);
- for (int i = 0; i < 50000; i++) {
- byte[] row = new byte[100];
- random.nextBytes(row);
- Mutation m = new Mutation(row);
- m.put("", "", "");
- bw.addMutation(m);
- }
- bw.close();
- c.tableOperations().flush(tableName, null, null, true);
- final AtomicBoolean stop = new AtomicBoolean(false);
- Thread t1 = slowScan(c, tableName, stop);
- t1.start();
- Thread t2 = slowScan(c, tableName, stop);
- t2.start();
- UtilWaitThread.sleep(500);
- long now = System.currentTimeMillis();
- Scanner m = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- Iterators.size(m.iterator());
- long delay = System.currentTimeMillis() - now;
- System.out.println("Delay = " + delay);
- assertTrue("metadata table scan was slow", delay < 1000);
- assertFalse(stop.get());
- stop.set(true);
- t1.interrupt();
- t2.interrupt();
- t1.join();
- t2.join();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/MetaSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/MetaSplitIT.java b/test/src/test/java/org/apache/accumulo/test/MetaSplitIT.java
deleted file mode 100644
index 0bc78fb..0000000
--- a/test/src/test/java/org/apache/accumulo/test/MetaSplitIT.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class MetaSplitIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(MetaSplitIT.class);
-
- private Collection<Text> metadataSplits = null;
-
- @Override
- public int defaultTimeoutSeconds() {
- return 3 * 60;
- }
-
- @Before
- public void saveMetadataSplits() throws Exception {
- if (ClusterType.STANDALONE == getClusterType()) {
- Connector conn = getConnector();
- Collection<Text> splits = conn.tableOperations().listSplits(MetadataTable.NAME);
- // We expect a single split
- if (!splits.equals(Arrays.asList(new Text("~")))) {
- log.info("Existing splits on metadata table. Saving them, and applying single original split of '~'");
- metadataSplits = splits;
- conn.tableOperations().merge(MetadataTable.NAME, null, null);
- conn.tableOperations().addSplits(MetadataTable.NAME, new TreeSet<Text>(Collections.singleton(new Text("~"))));
- }
- }
- }
-
- @After
- public void restoreMetadataSplits() throws Exception {
- if (null != metadataSplits) {
- log.info("Restoring split on metadata table");
- Connector conn = getConnector();
- conn.tableOperations().merge(MetadataTable.NAME, null, null);
- conn.tableOperations().addSplits(MetadataTable.NAME, new TreeSet<Text>(metadataSplits));
- }
- }
-
- @Test(expected = AccumuloException.class)
- public void testRootTableSplit() throws Exception {
- TableOperations opts = getConnector().tableOperations();
- SortedSet<Text> splits = new TreeSet<Text>();
- splits.add(new Text("5"));
- opts.addSplits(RootTable.NAME, splits);
- }
-
- @Test
- public void testRootTableMerge() throws Exception {
- TableOperations opts = getConnector().tableOperations();
- opts.merge(RootTable.NAME, null, null);
- }
-
- private void addSplits(TableOperations opts, String... points) throws Exception {
- SortedSet<Text> splits = new TreeSet<Text>();
- for (String point : points) {
- splits.add(new Text(point));
- }
- opts.addSplits(MetadataTable.NAME, splits);
- }
-
- @Test
- public void testMetadataTableSplit() throws Exception {
- TableOperations opts = getConnector().tableOperations();
- for (int i = 1; i <= 10; i++) {
- opts.create(Integer.toString(i));
- }
- try {
- opts.merge(MetadataTable.NAME, new Text("01"), new Text("02"));
- checkMetadataSplits(1, opts);
- addSplits(opts, "4 5 6 7 8".split(" "));
- checkMetadataSplits(6, opts);
- opts.merge(MetadataTable.NAME, new Text("6"), new Text("9"));
- checkMetadataSplits(4, opts);
- addSplits(opts, "44 55 66 77 88".split(" "));
- checkMetadataSplits(9, opts);
- opts.merge(MetadataTable.NAME, new Text("5"), new Text("7"));
- checkMetadataSplits(6, opts);
- opts.merge(MetadataTable.NAME, null, null);
- checkMetadataSplits(0, opts);
- } finally {
- for (int i = 1; i <= 10; i++) {
- opts.delete(Integer.toString(i));
- }
- }
- }
-
- private static void checkMetadataSplits(int numSplits, TableOperations opts) throws AccumuloSecurityException, TableNotFoundException, AccumuloException,
- InterruptedException {
- for (int i = 0; i < 10; i++) {
- if (opts.listSplits(MetadataTable.NAME).size() == numSplits) {
- break;
- }
- Thread.sleep(2000);
- }
- Collection<Text> splits = opts.listSplits(MetadataTable.NAME);
- assertEquals("Actual metadata table splits: " + splits, numSplits, splits.size());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java b/test/src/test/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
deleted file mode 100644
index b3bf196..0000000
--- a/test/src/test/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.io.File;
-import java.util.UUID;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.tabletserver.log.LogEntry;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.tserver.log.DfsLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterables;
-
-/**
- *
- */
-public class MissingWalHeaderCompletesRecoveryIT extends ConfigurableMacBase {
- private static final Logger log = LoggerFactory.getLogger(MissingWalHeaderCompletesRecoveryIT.class);
-
- private boolean rootHasWritePermission;
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration conf) {
- cfg.setNumTservers(1);
- cfg.setProperty(Property.MASTER_RECOVERY_DELAY, "1s");
- // Make sure the GC doesn't delete the file before the metadata reference is added
- cfg.setProperty(Property.GC_CYCLE_START, "999999s");
- conf.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Before
- public void setupMetadataPermission() throws Exception {
- Connector conn = getConnector();
- rootHasWritePermission = conn.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
- if (!rootHasWritePermission) {
- conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
- // Make sure it propagates through ZK
- Thread.sleep(5000);
- }
- }
-
- @After
- public void resetMetadataPermission() throws Exception {
- Connector conn = getConnector();
- // Final state doesn't match the original
- if (rootHasWritePermission != conn.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE)) {
- if (rootHasWritePermission) {
- // root had write permission when starting, ensure root still does
- conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
- } else {
- // root did not have write permission when starting, ensure that it does not
- conn.securityOperations().revokeTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
- }
- }
- }
-
- @Test
- public void testEmptyWalRecoveryCompletes() throws Exception {
- Connector conn = getConnector();
- MiniAccumuloClusterImpl cluster = getCluster();
- FileSystem fs = cluster.getFileSystem();
-
- // Fake out something that looks like host:port, it's irrelevant
- String fakeServer = "127.0.0.1:12345";
-
- File walogs = new File(cluster.getConfig().getAccumuloDir(), ServerConstants.WAL_DIR);
- File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
- File emptyWalog = new File(walogServerDir, UUID.randomUUID().toString());
-
- log.info("Created empty WAL at " + emptyWalog.toURI());
-
- fs.create(new Path(emptyWalog.toURI())).close();
-
- Assert.assertTrue("root user did not have write permission to metadata table",
- conn.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE));
-
- String tableName = getUniqueNames(1)[0];
- conn.tableOperations().create(tableName);
-
- String tableId = conn.tableOperations().tableIdMap().get(tableName);
- Assert.assertNotNull("Table ID was null", tableId);
-
- LogEntry logEntry = new LogEntry(new KeyExtent(new Text(tableId), null, null), 0, "127.0.0.1:12345", emptyWalog.toURI().toString());
-
- log.info("Taking {} offline", tableName);
- conn.tableOperations().offline(tableName, true);
-
- log.info("{} is offline", tableName);
-
- Text row = MetadataSchema.TabletsSection.getRow(new Text(tableId), null);
- Mutation m = new Mutation(row);
- m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
-
- BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
- bw.addMutation(m);
- bw.close();
-
- log.info("Bringing {} online", tableName);
- conn.tableOperations().online(tableName, true);
-
- log.info("{} is online", tableName);
-
- // Reading the table implies that recovery completed successfully (the empty file was ignored)
- // otherwise the tablet will never come online and we won't be able to read it.
- Scanner s = conn.createScanner(tableName, Authorizations.EMPTY);
- Assert.assertEquals(0, Iterables.size(s));
- }
-
- @Test
- public void testPartialHeaderWalRecoveryCompletes() throws Exception {
- Connector conn = getConnector();
- MiniAccumuloClusterImpl cluster = getCluster();
- FileSystem fs = getCluster().getFileSystem();
-
- // Fake out something that looks like host:port, it's irrelevant
- String fakeServer = "127.0.0.1:12345";
-
- File walogs = new File(cluster.getConfig().getAccumuloDir(), ServerConstants.WAL_DIR);
- File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
- File partialHeaderWalog = new File(walogServerDir, UUID.randomUUID().toString());
-
- log.info("Created WAL with malformed header at " + partialHeaderWalog.toURI());
-
- // Write half of the header
- FSDataOutputStream wal = fs.create(new Path(partialHeaderWalog.toURI()));
- wal.write(DfsLogger.LOG_FILE_HEADER_V3.getBytes(UTF_8), 0, DfsLogger.LOG_FILE_HEADER_V3.length() / 2);
- wal.close();
-
- Assert.assertTrue("root user did not have write permission to metadata table",
- conn.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE));
-
- String tableName = getUniqueNames(1)[0];
- conn.tableOperations().create(tableName);
-
- String tableId = conn.tableOperations().tableIdMap().get(tableName);
- Assert.assertNotNull("Table ID was null", tableId);
-
- LogEntry logEntry = new LogEntry(null, 0, "127.0.0.1:12345", partialHeaderWalog.toURI().toString());
-
- log.info("Taking {} offline", tableName);
- conn.tableOperations().offline(tableName, true);
-
- log.info("{} is offline", tableName);
-
- Text row = MetadataSchema.TabletsSection.getRow(new Text(tableId), null);
- Mutation m = new Mutation(row);
- m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
-
- BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
- bw.addMutation(m);
- bw.close();
-
- log.info("Bringing {} online", tableName);
- conn.tableOperations().online(tableName, true);
-
- log.info("{} is online", tableName);
-
- // Reading the table implies that recovery completed successfully (the empty file was ignored)
- // otherwise the tablet will never come online and we won't be able to read it.
- Scanner s = conn.createScanner(tableName, Authorizations.EMPTY);
- Assert.assertEquals(0, Iterables.size(s));
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java b/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
deleted file mode 100644
index 2b03780..0000000
--- a/test/src/test/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
+++ /dev/null
@@ -1,518 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MultiTableBatchWriter;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.TableOfflineException;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.MultiTableBatchWriterImpl;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.common.collect.Maps;
-
-public class MultiTableBatchWriterIT extends AccumuloClusterHarness {
-
- private Connector connector;
- private MultiTableBatchWriter mtbw;
-
- @Override
- public int defaultTimeoutSeconds() {
- return 5 * 60;
- }
-
- @Before
- public void setUpArgs() throws AccumuloException, AccumuloSecurityException {
- connector = getConnector();
- mtbw = getMultiTableBatchWriter(60);
- }
-
- public MultiTableBatchWriter getMultiTableBatchWriter(long cacheTimeoutInSeconds) {
- ClientContext context = new ClientContext(connector.getInstance(), new Credentials(getAdminPrincipal(), getAdminToken()), getCluster().getClientConfig());
- return new MultiTableBatchWriterImpl(context, new BatchWriterConfig(), cacheTimeoutInSeconds, TimeUnit.SECONDS);
- }
-
- @Test
- public void testTableRenameDataValidation() throws Exception {
-
- try {
- final String[] names = getUniqueNames(2);
- final String table1 = names[0], table2 = names[1];
-
- TableOperations tops = connector.tableOperations();
- tops.create(table1);
-
- BatchWriter bw1 = mtbw.getBatchWriter(table1);
-
- Mutation m1 = new Mutation("foo");
- m1.put("col1", "", "val1");
-
- bw1.addMutation(m1);
-
- tops.rename(table1, table2);
- tops.create(table1);
-
- BatchWriter bw2 = mtbw.getBatchWriter(table1);
-
- Mutation m2 = new Mutation("bar");
- m2.put("col1", "", "val1");
-
- bw1.addMutation(m2);
- bw2.addMutation(m2);
-
- mtbw.close();
-
- Map<Entry<String,String>,String> table1Expectations = new HashMap<Entry<String,String>,String>();
- table1Expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
-
- Map<Entry<String,String>,String> table2Expectations = new HashMap<Entry<String,String>,String>();
- table2Expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
- table2Expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
-
- Scanner s = connector.createScanner(table1, new Authorizations());
- s.setRange(new Range());
- Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
- for (Entry<Key,Value> entry : s) {
- actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
- }
-
- Assert.assertEquals("Differing results for " + table1, table1Expectations, actual);
-
- s = connector.createScanner(table2, new Authorizations());
- s.setRange(new Range());
- actual = new HashMap<Entry<String,String>,String>();
- for (Entry<Key,Value> entry : s) {
- actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
- }
-
- Assert.assertEquals("Differing results for " + table2, table2Expectations, actual);
-
- } finally {
- if (null != mtbw) {
- mtbw.close();
- }
- }
- }
-
- @Test
- public void testTableRenameSameWriters() throws Exception {
-
- try {
- final String[] names = getUniqueNames(4);
- final String table1 = names[0], table2 = names[1];
- final String newTable1 = names[2], newTable2 = names[3];
-
- TableOperations tops = connector.tableOperations();
- tops.create(table1);
- tops.create(table2);
-
- BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
- Mutation m1 = new Mutation("foo");
- m1.put("col1", "", "val1");
- m1.put("col2", "", "val2");
-
- bw1.addMutation(m1);
- bw2.addMutation(m1);
-
- tops.rename(table1, newTable1);
- tops.rename(table2, newTable2);
-
- Mutation m2 = new Mutation("bar");
- m2.put("col1", "", "val1");
- m2.put("col2", "", "val2");
-
- bw1.addMutation(m2);
- bw2.addMutation(m2);
-
- mtbw.close();
-
- Map<Entry<String,String>,String> expectations = new HashMap<Entry<String,String>,String>();
- expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
- expectations.put(Maps.immutableEntry("foo", "col2"), "val2");
- expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
- expectations.put(Maps.immutableEntry("bar", "col2"), "val2");
-
- for (String table : Arrays.asList(newTable1, newTable2)) {
- Scanner s = connector.createScanner(table, new Authorizations());
- s.setRange(new Range());
- Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
- for (Entry<Key,Value> entry : s) {
- actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
- }
-
- Assert.assertEquals("Differing results for " + table, expectations, actual);
- }
- } finally {
- if (null != mtbw) {
- mtbw.close();
- }
- }
- }
-
- @Test
- public void testTableRenameNewWriters() throws Exception {
-
- try {
- final String[] names = getUniqueNames(4);
- final String table1 = names[0], table2 = names[1];
- final String newTable1 = names[2], newTable2 = names[3];
-
- TableOperations tops = connector.tableOperations();
- tops.create(table1);
- tops.create(table2);
-
- BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
- Mutation m1 = new Mutation("foo");
- m1.put("col1", "", "val1");
- m1.put("col2", "", "val2");
-
- bw1.addMutation(m1);
- bw2.addMutation(m1);
-
- tops.rename(table1, newTable1);
-
- // MTBW is still caching this name to the correct table, but we should invalidate its cache
- // after seeing the rename
- try {
- bw1 = mtbw.getBatchWriter(table1);
- Assert.fail("Should not be able to find this table");
- } catch (TableNotFoundException e) {
- // pass
- }
-
- tops.rename(table2, newTable2);
-
- try {
- bw2 = mtbw.getBatchWriter(table2);
- Assert.fail("Should not be able to find this table");
- } catch (TableNotFoundException e) {
- // pass
- }
-
- bw1 = mtbw.getBatchWriter(newTable1);
- bw2 = mtbw.getBatchWriter(newTable2);
-
- Mutation m2 = new Mutation("bar");
- m2.put("col1", "", "val1");
- m2.put("col2", "", "val2");
-
- bw1.addMutation(m2);
- bw2.addMutation(m2);
-
- mtbw.close();
-
- Map<Entry<String,String>,String> expectations = new HashMap<Entry<String,String>,String>();
- expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
- expectations.put(Maps.immutableEntry("foo", "col2"), "val2");
- expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
- expectations.put(Maps.immutableEntry("bar", "col2"), "val2");
-
- for (String table : Arrays.asList(newTable1, newTable2)) {
- Scanner s = connector.createScanner(table, new Authorizations());
- s.setRange(new Range());
- Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
- for (Entry<Key,Value> entry : s) {
- actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
- }
-
- Assert.assertEquals("Differing results for " + table, expectations, actual);
- }
- } finally {
- if (null != mtbw) {
- mtbw.close();
- }
- }
- }
-
- @Test
- public void testTableRenameNewWritersNoCaching() throws Exception {
- mtbw = getMultiTableBatchWriter(0);
-
- try {
- final String[] names = getUniqueNames(4);
- final String table1 = names[0], table2 = names[1];
- final String newTable1 = names[2], newTable2 = names[3];
-
- TableOperations tops = connector.tableOperations();
- tops.create(table1);
- tops.create(table2);
-
- BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
- Mutation m1 = new Mutation("foo");
- m1.put("col1", "", "val1");
- m1.put("col2", "", "val2");
-
- bw1.addMutation(m1);
- bw2.addMutation(m1);
-
- tops.rename(table1, newTable1);
- tops.rename(table2, newTable2);
-
- try {
- bw1 = mtbw.getBatchWriter(table1);
- Assert.fail("Should not have gotten batchwriter for " + table1);
- } catch (TableNotFoundException e) {
- // Pass
- }
-
- try {
- bw2 = mtbw.getBatchWriter(table2);
- } catch (TableNotFoundException e) {
- // Pass
- }
- } finally {
- if (null != mtbw) {
- mtbw.close();
- }
- }
- }
-
- @Test
- public void testTableDelete() throws Exception {
- boolean mutationsRejected = false;
-
- try {
- final String[] names = getUniqueNames(2);
- final String table1 = names[0], table2 = names[1];
-
- TableOperations tops = connector.tableOperations();
- tops.create(table1);
- tops.create(table2);
-
- BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
- Mutation m1 = new Mutation("foo");
- m1.put("col1", "", "val1");
- m1.put("col2", "", "val2");
-
- bw1.addMutation(m1);
- bw2.addMutation(m1);
-
- tops.delete(table1);
- tops.delete(table2);
-
- Mutation m2 = new Mutation("bar");
- m2.put("col1", "", "val1");
- m2.put("col2", "", "val2");
-
- try {
- bw1.addMutation(m2);
- bw2.addMutation(m2);
- } catch (MutationsRejectedException e) {
- // Pass - Mutations might flush immediately
- mutationsRejected = true;
- }
-
- } finally {
- if (null != mtbw) {
- try {
- // Mutations might have flushed before the table offline occurred
- mtbw.close();
- } catch (MutationsRejectedException e) {
- // Pass
- mutationsRejected = true;
- }
- }
- }
-
- Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
- }
-
- @Test
- public void testOfflineTable() throws Exception {
- boolean mutationsRejected = false;
-
- try {
- final String[] names = getUniqueNames(2);
- final String table1 = names[0], table2 = names[1];
-
- TableOperations tops = connector.tableOperations();
- tops.create(table1);
- tops.create(table2);
-
- BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
- Mutation m1 = new Mutation("foo");
- m1.put("col1", "", "val1");
- m1.put("col2", "", "val2");
-
- bw1.addMutation(m1);
- bw2.addMutation(m1);
-
- tops.offline(table1, true);
- tops.offline(table2, true);
-
- Mutation m2 = new Mutation("bar");
- m2.put("col1", "", "val1");
- m2.put("col2", "", "val2");
-
- try {
- bw1.addMutation(m2);
- bw2.addMutation(m2);
- } catch (MutationsRejectedException e) {
- // Pass -- Mutations might flush immediately and fail because of offline table
- mutationsRejected = true;
- }
- } finally {
- if (null != mtbw) {
- try {
- mtbw.close();
- } catch (MutationsRejectedException e) {
- // Pass
- mutationsRejected = true;
- }
- }
- }
-
- Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
- }
-
- @Test
- public void testOfflineTableWithCache() throws Exception {
- boolean mutationsRejected = false;
-
- try {
- final String[] names = getUniqueNames(2);
- final String table1 = names[0], table2 = names[1];
-
- TableOperations tops = connector.tableOperations();
- tops.create(table1);
- tops.create(table2);
-
- BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
- Mutation m1 = new Mutation("foo");
- m1.put("col1", "", "val1");
- m1.put("col2", "", "val2");
-
- bw1.addMutation(m1);
- bw2.addMutation(m1);
-
- tops.offline(table1);
-
- try {
- bw1 = mtbw.getBatchWriter(table1);
- } catch (TableOfflineException e) {
- // pass
- mutationsRejected = true;
- }
-
- tops.offline(table2);
-
- try {
- bw2 = mtbw.getBatchWriter(table2);
- } catch (TableOfflineException e) {
- // pass
- mutationsRejected = true;
- }
- } finally {
- if (null != mtbw) {
- try {
- // Mutations might have flushed before the table offline occurred
- mtbw.close();
- } catch (MutationsRejectedException e) {
- // Pass
- mutationsRejected = true;
- }
- }
- }
-
- Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
- }
-
- @Test
- public void testOfflineTableWithoutCache() throws Exception {
- mtbw = getMultiTableBatchWriter(0);
- boolean mutationsRejected = false;
-
- try {
- final String[] names = getUniqueNames(2);
- final String table1 = names[0], table2 = names[1];
-
- TableOperations tops = connector.tableOperations();
- tops.create(table1);
- tops.create(table2);
-
- BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
-
- Mutation m1 = new Mutation("foo");
- m1.put("col1", "", "val1");
- m1.put("col2", "", "val2");
-
- bw1.addMutation(m1);
- bw2.addMutation(m1);
-
- // Mutations might or might not flush before tables goes offline
- tops.offline(table1);
- tops.offline(table2);
-
- try {
- bw1 = mtbw.getBatchWriter(table1);
- Assert.fail(table1 + " should be offline");
- } catch (TableOfflineException e) {
- // pass
- mutationsRejected = true;
- }
-
- try {
- bw2 = mtbw.getBatchWriter(table2);
- Assert.fail(table1 + " should be offline");
- } catch (TableOfflineException e) {
- // pass
- mutationsRejected = true;
- }
- } finally {
- if (null != mtbw) {
- try {
- // Mutations might have flushed before the table offline occurred
- mtbw.close();
- } catch (MutationsRejectedException e) {
- // Pass
- mutationsRejected = true;
- }
- }
- }
-
- Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
- }
-}
[15/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/VolumeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/VolumeIT.java b/test/src/test/java/org/apache/accumulo/test/VolumeIT.java
deleted file mode 100644
index c25370d..0000000
--- a/test/src/test/java/org/apache/accumulo/test/VolumeIT.java
+++ /dev/null
@@ -1,568 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.admin.DiskUsage;
-import org.apache.accumulo.core.client.admin.NewTableConfiguration;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooReader;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.init.Initialize;
-import org.apache.accumulo.server.log.WalStateManager;
-import org.apache.accumulo.server.log.WalStateManager.WalState;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class VolumeIT extends ConfigurableMacBase {
-
- private static final Text EMPTY = new Text();
- private static final Value EMPTY_VALUE = new Value(new byte[] {});
- private File volDirBase;
- private Path v1, v2;
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 5 * 60;
- }
-
- @SuppressWarnings("deprecation")
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- File baseDir = cfg.getDir();
- volDirBase = new File(baseDir, "volumes");
- File v1f = new File(volDirBase, "v1");
- File v2f = new File(volDirBase, "v2");
- v1 = new Path("file://" + v1f.getAbsolutePath());
- v2 = new Path("file://" + v2f.getAbsolutePath());
-
- // Run MAC on two locations in the local file system
- URI v1Uri = v1.toUri();
- cfg.setProperty(Property.INSTANCE_DFS_DIR, v1Uri.getPath());
- cfg.setProperty(Property.INSTANCE_DFS_URI, v1Uri.getScheme() + v1Uri.getHost());
- cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString());
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
-
- // use raw local file system so walogs sync and flush will work
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-
- super.configure(cfg, hadoopCoreSite);
- }
-
- @Test
- public void test() throws Exception {
- // create a table
- Connector connector = getConnector();
- String tableName = getUniqueNames(1)[0];
- connector.tableOperations().create(tableName);
- SortedSet<Text> partitions = new TreeSet<Text>();
- // with some splits
- for (String s : "d,m,t".split(","))
- partitions.add(new Text(s));
- connector.tableOperations().addSplits(tableName, partitions);
- // scribble over the splits
- BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
- String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
- for (String s : rows) {
- Mutation m = new Mutation(new Text(s));
- m.put(EMPTY, EMPTY, EMPTY_VALUE);
- bw.addMutation(m);
- }
- bw.close();
- // write the data to disk, read it back
- connector.tableOperations().flush(tableName, null, null, true);
- Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
- int i = 0;
- for (Entry<Key,Value> entry : scanner) {
- assertEquals(rows[i++], entry.getKey().getRow().toString());
- }
- // verify the new files are written to the different volumes
- scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- scanner.setRange(new Range("1", "1<"));
- scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
- int fileCount = 0;
-
- for (Entry<Key,Value> entry : scanner) {
- boolean inV1 = entry.getKey().getColumnQualifier().toString().contains(v1.toString());
- boolean inV2 = entry.getKey().getColumnQualifier().toString().contains(v2.toString());
- assertTrue(inV1 || inV2);
- fileCount++;
- }
- assertEquals(4, fileCount);
- List<DiskUsage> diskUsage = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
- assertEquals(1, diskUsage.size());
- long usage = diskUsage.get(0).getUsage().longValue();
- System.out.println("usage " + usage);
- assertTrue(usage > 700 && usage < 800);
- }
-
- private void verifyData(List<String> expected, Scanner createScanner) {
-
- List<String> actual = new ArrayList<String>();
-
- for (Entry<Key,Value> entry : createScanner) {
- Key k = entry.getKey();
- actual.add(k.getRow() + ":" + k.getColumnFamily() + ":" + k.getColumnQualifier() + ":" + entry.getValue());
- }
-
- Collections.sort(expected);
- Collections.sort(actual);
-
- Assert.assertEquals(expected, actual);
- }
-
- @Test
- public void testRelativePaths() throws Exception {
-
- List<String> expected = new ArrayList<String>();
-
- Connector connector = getConnector();
- String tableName = getUniqueNames(1)[0];
- connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
-
- String tableId = connector.tableOperations().tableIdMap().get(tableName);
-
- SortedSet<Text> partitions = new TreeSet<Text>();
- // with some splits
- for (String s : "c,g,k,p,s,v".split(","))
- partitions.add(new Text(s));
-
- connector.tableOperations().addSplits(tableName, partitions);
-
- BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
-
- // create two files in each tablet
-
- String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
- for (String s : rows) {
- Mutation m = new Mutation(s);
- m.put("cf1", "cq1", "1");
- bw.addMutation(m);
- expected.add(s + ":cf1:cq1:1");
- }
-
- bw.flush();
- connector.tableOperations().flush(tableName, null, null, true);
-
- for (String s : rows) {
- Mutation m = new Mutation(s);
- m.put("cf1", "cq1", "2");
- bw.addMutation(m);
- expected.add(s + ":cf1:cq1:2");
- }
-
- bw.close();
- connector.tableOperations().flush(tableName, null, null, true);
-
- verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
-
- connector.tableOperations().offline(tableName, true);
-
- connector.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
-
- Scanner metaScanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
- metaScanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-
- BatchWriter mbw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
- for (Entry<Key,Value> entry : metaScanner) {
- String cq = entry.getKey().getColumnQualifier().toString();
- if (cq.startsWith(v1.toString())) {
- Path path = new Path(cq);
- String relPath = "/" + path.getParent().getName() + "/" + path.getName();
- Mutation fileMut = new Mutation(entry.getKey().getRow());
- fileMut.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
- fileMut.put(entry.getKey().getColumnFamily().toString(), relPath, entry.getValue().toString());
- mbw.addMutation(fileMut);
- }
- }
-
- mbw.close();
-
- connector.tableOperations().online(tableName, true);
-
- verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
-
- connector.tableOperations().compact(tableName, null, null, true, true);
-
- verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
-
- for (Entry<Key,Value> entry : metaScanner) {
- String cq = entry.getKey().getColumnQualifier().toString();
- Path path = new Path(cq);
- Assert.assertTrue("relative path not deleted " + path.toString(), path.depth() > 2);
- }
-
- }
-
- @Test
- public void testAddVolumes() throws Exception {
-
- String[] tableNames = getUniqueNames(2);
-
- // grab this before shutting down cluster
- String uuid = new ZooKeeperInstance(cluster.getClientConfig()).getInstanceID();
-
- verifyVolumesUsed(tableNames[0], false, v1, v2);
-
- Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
- cluster.stop();
-
- Configuration conf = new Configuration(false);
- conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
-
- File v3f = new File(volDirBase, "v3");
- assertTrue(v3f.mkdir() || v3f.isDirectory());
- Path v3 = new Path("file://" + v3f.getAbsolutePath());
-
- conf.set(Property.INSTANCE_VOLUMES.getKey(), v1.toString() + "," + v2.toString() + "," + v3.toString());
- BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
- conf.writeXml(fos);
- fos.close();
-
- // initialize volume
- Assert.assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor());
-
- // check that all volumes are initialized
- for (Path volumePath : Arrays.asList(v1, v2, v3)) {
- FileSystem fs = volumePath.getFileSystem(CachedConfiguration.getInstance());
- Path vp = new Path(volumePath, ServerConstants.INSTANCE_ID_DIR);
- FileStatus[] iids = fs.listStatus(vp);
- Assert.assertEquals(1, iids.length);
- Assert.assertEquals(uuid, iids[0].getPath().getName());
- }
-
- // start cluster and verify that new volume is used
- cluster.start();
-
- verifyVolumesUsed(tableNames[1], false, v1, v2, v3);
- }
-
- @Test
- public void testNonConfiguredVolumes() throws Exception {
-
- String[] tableNames = getUniqueNames(2);
-
- // grab this before shutting down cluster
- String uuid = new ZooKeeperInstance(cluster.getClientConfig()).getInstanceID();
-
- verifyVolumesUsed(tableNames[0], false, v1, v2);
-
- Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
- cluster.stop();
-
- Configuration conf = new Configuration(false);
- conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
-
- File v3f = new File(volDirBase, "v3");
- assertTrue(v3f.mkdir() || v3f.isDirectory());
- Path v3 = new Path("file://" + v3f.getAbsolutePath());
-
- conf.set(Property.INSTANCE_VOLUMES.getKey(), v2.toString() + "," + v3.toString());
- BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
- conf.writeXml(fos);
- fos.close();
-
- // initialize volume
- Assert.assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor());
-
- // check that all volumes are initialized
- for (Path volumePath : Arrays.asList(v1, v2, v3)) {
- FileSystem fs = volumePath.getFileSystem(CachedConfiguration.getInstance());
- Path vp = new Path(volumePath, ServerConstants.INSTANCE_ID_DIR);
- FileStatus[] iids = fs.listStatus(vp);
- Assert.assertEquals(1, iids.length);
- Assert.assertEquals(uuid, iids[0].getPath().getName());
- }
-
- // start cluster and verify that new volume is used
- cluster.start();
-
- // Make sure we can still read the tables (tableNames[0] is very likely to have a file still on v1)
- List<String> expected = new ArrayList<String>();
- for (int i = 0; i < 100; i++) {
- String row = String.format("%06d", i * 100 + 3);
- expected.add(row + ":cf1:cq1:1");
- }
-
- verifyData(expected, getConnector().createScanner(tableNames[0], Authorizations.EMPTY));
-
- // v1 should not have any data for tableNames[1]
- verifyVolumesUsed(tableNames[1], false, v2, v3);
- }
-
- private void writeData(String tableName, Connector conn) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException,
- MutationsRejectedException {
- TreeSet<Text> splits = new TreeSet<Text>();
- for (int i = 1; i < 100; i++) {
- splits.add(new Text(String.format("%06d", i * 100)));
- }
-
- conn.tableOperations().create(tableName);
- conn.tableOperations().addSplits(tableName, splits);
-
- BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
- for (int i = 0; i < 100; i++) {
- String row = String.format("%06d", i * 100 + 3);
- Mutation m = new Mutation(row);
- m.put("cf1", "cq1", "1");
- bw.addMutation(m);
- }
-
- bw.close();
- }
-
- private void verifyVolumesUsed(String tableName, boolean shouldExist, Path... paths) throws Exception {
-
- Connector conn = getConnector();
-
- List<String> expected = new ArrayList<String>();
- for (int i = 0; i < 100; i++) {
- String row = String.format("%06d", i * 100 + 3);
- expected.add(row + ":cf1:cq1:1");
- }
-
- if (!conn.tableOperations().exists(tableName)) {
- Assert.assertFalse(shouldExist);
-
- writeData(tableName, conn);
-
- verifyData(expected, conn.createScanner(tableName, Authorizations.EMPTY));
-
- conn.tableOperations().flush(tableName, null, null, true);
- }
-
- verifyData(expected, conn.createScanner(tableName, Authorizations.EMPTY));
-
- String tableId = conn.tableOperations().tableIdMap().get(tableName);
- Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(metaScanner);
- metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
- metaScanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-
- int counts[] = new int[paths.length];
-
- outer: for (Entry<Key,Value> entry : metaScanner) {
- String cf = entry.getKey().getColumnFamily().toString();
- String cq = entry.getKey().getColumnQualifier().toString();
-
- String path;
- if (cf.equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME.toString()))
- path = cq;
- else
- path = entry.getValue().toString();
-
- for (int i = 0; i < paths.length; i++) {
- if (path.startsWith(paths[i].toString())) {
- counts[i]++;
- continue outer;
- }
- }
-
- Assert.fail("Unexpected volume " + path);
- }
-
- Instance i = conn.getInstance();
- ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
- WalStateManager wals = new WalStateManager(i, zk);
- outer: for (Entry<Path,WalState> entry : wals.getAllState().entrySet()) {
- for (Path path : paths) {
- if (entry.getKey().toString().startsWith(path.toString())) {
- continue outer;
- }
- }
- Assert.fail("Unexpected volume " + entry.getKey());
- }
-
- // if a volume is chosen randomly for each tablet, then the probability that a volume will not be chosen for any tablet is ((num_volumes -
- // 1)/num_volumes)^num_tablets. For 100 tablets and 3 volumes the probability that only 2 volumes would be chosen is 2.46e-18
-
- int sum = 0;
- for (int count : counts) {
- Assert.assertTrue(count > 0);
- sum += count;
- }
-
- Assert.assertEquals(200, sum);
-
- }
-
- @Test
- public void testRemoveVolumes() throws Exception {
- String[] tableNames = getUniqueNames(2);
-
- verifyVolumesUsed(tableNames[0], false, v1, v2);
-
- Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
- cluster.stop();
-
- Configuration conf = new Configuration(false);
- conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
-
- conf.set(Property.INSTANCE_VOLUMES.getKey(), v2.toString());
- BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
- conf.writeXml(fos);
- fos.close();
-
- // start cluster and verify that volume was decommisioned
- cluster.start();
-
- Connector conn = cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
- conn.tableOperations().compact(tableNames[0], null, null, true, true);
-
- verifyVolumesUsed(tableNames[0], true, v2);
-
- // check that root tablet is not on volume 1
- ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
- String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig())) + RootTable.ZROOT_TABLET_PATH;
- String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
- Assert.assertTrue(rootTabletDir.startsWith(v2.toString()));
-
- conn.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<String,String>(), new HashSet<String>());
-
- conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
- conn.tableOperations().flush(RootTable.NAME, null, null, true);
-
- verifyVolumesUsed(tableNames[0], true, v2);
- verifyVolumesUsed(tableNames[1], true, v2);
-
- }
-
- private void testReplaceVolume(boolean cleanShutdown) throws Exception {
- String[] tableNames = getUniqueNames(3);
-
- verifyVolumesUsed(tableNames[0], false, v1, v2);
-
- // write to 2nd table, but do not flush data to disk before shutdown
- writeData(tableNames[1], cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)));
-
- if (cleanShutdown)
- Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
-
- cluster.stop();
-
- File v1f = new File(v1.toUri());
- File v8f = new File(new File(v1.getParent().toUri()), "v8");
- Assert.assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f));
- Path v8 = new Path(v8f.toURI());
-
- File v2f = new File(v2.toUri());
- File v9f = new File(new File(v2.getParent().toUri()), "v9");
- Assert.assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f));
- Path v9 = new Path(v9f.toURI());
-
- Configuration conf = new Configuration(false);
- conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
-
- conf.set(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9);
- conf.set(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9);
- BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
- conf.writeXml(fos);
- fos.close();
-
- // start cluster and verify that volumes were replaced
- cluster.start();
-
- verifyVolumesUsed(tableNames[0], true, v8, v9);
- verifyVolumesUsed(tableNames[1], true, v8, v9);
-
- // verify writes to new dir
- getConnector().tableOperations().compact(tableNames[0], null, null, true, true);
- getConnector().tableOperations().compact(tableNames[1], null, null, true, true);
-
- verifyVolumesUsed(tableNames[0], true, v8, v9);
- verifyVolumesUsed(tableNames[1], true, v8, v9);
-
- // check that root tablet is not on volume 1 or 2
- ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
- String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig())) + RootTable.ZROOT_TABLET_PATH;
- String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
- Assert.assertTrue(rootTabletDir.startsWith(v8.toString()) || rootTabletDir.startsWith(v9.toString()));
-
- getConnector().tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<String,String>(), new HashSet<String>());
-
- getConnector().tableOperations().flush(MetadataTable.NAME, null, null, true);
- getConnector().tableOperations().flush(RootTable.NAME, null, null, true);
-
- verifyVolumesUsed(tableNames[0], true, v8, v9);
- verifyVolumesUsed(tableNames[1], true, v8, v9);
- verifyVolumesUsed(tableNames[2], true, v8, v9);
- }
-
- @Test
- public void testCleanReplaceVolumes() throws Exception {
- testReplaceVolume(true);
- }
-
- @Test
- public void testDirtyReplaceVolumes() throws Exception {
- testReplaceVolume(false);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/WaitForBalanceIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/WaitForBalanceIT.java b/test/src/test/java/org/apache/accumulo/test/WaitForBalanceIT.java
deleted file mode 100644
index 249bf14..0000000
--- a/test/src/test/java/org/apache/accumulo/test/WaitForBalanceIT.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class WaitForBalanceIT extends ConfigurableMacBase {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void test() throws Exception {
- final Connector c = getConnector();
- // ensure the metadata table is online
- Iterators.size(c.createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
- c.instanceOperations().waitForBalance();
- assertTrue(isBalanced());
- final String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.instanceOperations().waitForBalance();
- final SortedSet<Text> partitionKeys = new TreeSet<Text>();
- for (int i = 0; i < 1000; i++) {
- partitionKeys.add(new Text("" + i));
- }
- c.tableOperations().addSplits(tableName, partitionKeys);
- assertFalse(isBalanced());
- c.instanceOperations().waitForBalance();
- assertTrue(isBalanced());
- }
-
- private boolean isBalanced() throws Exception {
- final Map<String,Integer> counts = new HashMap<String,Integer>();
- int offline = 0;
- final Connector c = getConnector();
- for (String tableName : new String[] {MetadataTable.NAME, RootTable.NAME}) {
- final Scanner s = c.createScanner(tableName, Authorizations.EMPTY);
- s.setRange(MetadataSchema.TabletsSection.getRange());
- s.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
- MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s);
- String location = null;
- for (Entry<Key,Value> entry : s) {
- Key key = entry.getKey();
- if (key.getColumnFamily().equals(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME)) {
- location = key.getColumnQualifier().toString();
- } else if (MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
- if (location == null) {
- offline++;
- } else {
- Integer count = counts.get(location);
- if (count == null)
- count = new Integer(0);
- count = new Integer(count.intValue() + 1);
- counts.put(location, count);
- }
- location = null;
- }
- }
- }
- // the replication table is expected to be offline for this test, so ignore it
- if (offline > 1) {
- System.out.println("Offline tablets " + offline);
- return false;
- }
- int average = 0;
- for (Integer i : counts.values()) {
- average += i;
- }
- average /= counts.size();
- System.out.println(counts);
- int tablesCount = c.tableOperations().list().size();
- for (Entry<String,Integer> hostCount : counts.entrySet()) {
- if (Math.abs(average - hostCount.getValue()) > tablesCount) {
- System.out.println("Average " + average + " count " + hostCount.getKey() + ": " + hostCount.getValue());
- return false;
- }
- }
- return true;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java b/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java
deleted file mode 100644
index 118f053..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.lang.System.currentTimeMillis;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
-import org.apache.accumulo.core.client.mapreduce.impl.BatchInputSplit;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.ConfigurationCopy;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.InputSplit;
-import org.apache.hadoop.mapreduce.Job;
-import org.junit.Before;
-import org.junit.Test;
-
-public class AccumuloInputFormatIT extends AccumuloClusterHarness {
-
- AccumuloInputFormat inputFormat;
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- }
-
- @Before
- public void before() {
- inputFormat = new AccumuloInputFormat();
- }
-
- /**
- * Tests several different paths through the getSplits() method by setting different properties and verifying the results.
- */
- @Test
- public void testGetSplits() throws Exception {
- Connector conn = getConnector();
- String table = getUniqueNames(1)[0];
- conn.tableOperations().create(table);
- insertData(table, currentTimeMillis());
-
- ClientConfiguration clientConf = cluster.getClientConfig();
- AccumuloConfiguration clusterClientConf = new ConfigurationCopy(new DefaultConfiguration());
-
- // Pass SSL and CredentialProvider options into the ClientConfiguration given to AccumuloInputFormat
- boolean sslEnabled = Boolean.valueOf(clusterClientConf.get(Property.INSTANCE_RPC_SSL_ENABLED));
- if (sslEnabled) {
- ClientProperty[] sslProperties = new ClientProperty[] {ClientProperty.INSTANCE_RPC_SSL_ENABLED, ClientProperty.INSTANCE_RPC_SSL_CLIENT_AUTH,
- ClientProperty.RPC_SSL_KEYSTORE_PATH, ClientProperty.RPC_SSL_KEYSTORE_TYPE, ClientProperty.RPC_SSL_KEYSTORE_PASSWORD,
- ClientProperty.RPC_SSL_TRUSTSTORE_PATH, ClientProperty.RPC_SSL_TRUSTSTORE_TYPE, ClientProperty.RPC_SSL_TRUSTSTORE_PASSWORD,
- ClientProperty.RPC_USE_JSSE, ClientProperty.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS};
-
- for (ClientProperty prop : sslProperties) {
- // The default property is returned if it's not in the ClientConfiguration so we don't have to check if the value is actually defined
- clientConf.setProperty(prop, clusterClientConf.get(prop.getKey()));
- }
- }
-
- Job job = Job.getInstance();
- AccumuloInputFormat.setInputTableName(job, table);
- AccumuloInputFormat.setZooKeeperInstance(job, clientConf);
- AccumuloInputFormat.setConnectorInfo(job, getAdminPrincipal(), getAdminToken());
-
- // split table
- TreeSet<Text> splitsToAdd = new TreeSet<Text>();
- for (int i = 0; i < 10000; i += 1000)
- splitsToAdd.add(new Text(String.format("%09d", i)));
- conn.tableOperations().addSplits(table, splitsToAdd);
- UtilWaitThread.sleep(500); // wait for splits to be propagated
-
- // get splits without setting any range
- Collection<Text> actualSplits = conn.tableOperations().listSplits(table);
- List<InputSplit> splits = inputFormat.getSplits(job);
- assertEquals(actualSplits.size() + 1, splits.size()); // No ranges set on the job so it'll start with -inf
-
- // set ranges and get splits
- List<Range> ranges = new ArrayList<Range>();
- for (Text text : actualSplits)
- ranges.add(new Range(text));
- AccumuloInputFormat.setRanges(job, ranges);
- splits = inputFormat.getSplits(job);
- assertEquals(actualSplits.size(), splits.size());
-
- // offline mode
- AccumuloInputFormat.setOfflineTableScan(job, true);
- try {
- inputFormat.getSplits(job);
- fail("An exception should have been thrown");
- } catch (IOException e) {}
-
- conn.tableOperations().offline(table, true);
- splits = inputFormat.getSplits(job);
- assertEquals(actualSplits.size(), splits.size());
-
- // auto adjust ranges
- ranges = new ArrayList<Range>();
- for (int i = 0; i < 5; i++)
- // overlapping ranges
- ranges.add(new Range(String.format("%09d", i), String.format("%09d", i + 2)));
- AccumuloInputFormat.setRanges(job, ranges);
- splits = inputFormat.getSplits(job);
- assertEquals(2, splits.size());
-
- AccumuloInputFormat.setAutoAdjustRanges(job, false);
- splits = inputFormat.getSplits(job);
- assertEquals(ranges.size(), splits.size());
-
- // BatchScan not available for offline scans
- AccumuloInputFormat.setBatchScan(job, true);
- // Reset auto-adjust ranges too
- AccumuloInputFormat.setAutoAdjustRanges(job, true);
-
- AccumuloInputFormat.setOfflineTableScan(job, true);
- try {
- inputFormat.getSplits(job);
- fail("An exception should have been thrown");
- } catch (IllegalArgumentException e) {}
-
- conn.tableOperations().online(table, true);
- AccumuloInputFormat.setOfflineTableScan(job, false);
-
- // test for resumption of success
- splits = inputFormat.getSplits(job);
- assertEquals(2, splits.size());
-
- // BatchScan not available with isolated iterators
- AccumuloInputFormat.setScanIsolation(job, true);
- try {
- inputFormat.getSplits(job);
- fail("An exception should have been thrown");
- } catch (IllegalArgumentException e) {}
- AccumuloInputFormat.setScanIsolation(job, false);
-
- // test for resumption of success
- splits = inputFormat.getSplits(job);
- assertEquals(2, splits.size());
-
- // BatchScan not available with local iterators
- AccumuloInputFormat.setLocalIterators(job, true);
- try {
- inputFormat.getSplits(job);
- fail("An exception should have been thrown");
- } catch (IllegalArgumentException e) {}
- AccumuloInputFormat.setLocalIterators(job, false);
-
- // Check we are getting back correct type pf split
- conn.tableOperations().online(table);
- splits = inputFormat.getSplits(job);
- for (InputSplit split : splits)
- assert (split instanceof BatchInputSplit);
-
- // We should divide along the tablet lines similar to when using `setAutoAdjustRanges(job, true)`
- assertEquals(2, splits.size());
- }
-
- private void insertData(String tableName, long ts) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
- BatchWriter bw = getConnector().createBatchWriter(tableName, null);
-
- for (int i = 0; i < 10000; i++) {
- String row = String.format("%09d", i);
-
- Mutation m = new Mutation(new Text(row));
- m.put(new Text("cf1"), new Text("cq1"), ts, new Value(("" + i).getBytes()));
- bw.addMutation(m);
- }
- bw.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/AddSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/AddSplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/AddSplitIT.java
deleted file mode 100644
index 4b4aeac..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/AddSplitIT.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class AddSplitIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void addSplitTest() throws Exception {
-
- String tableName = getUniqueNames(1)[0];
- Connector c = getConnector();
- c.tableOperations().create(tableName);
-
- insertData(tableName, 1l);
-
- TreeSet<Text> splits = new TreeSet<Text>();
- splits.add(new Text(String.format("%09d", 333)));
- splits.add(new Text(String.format("%09d", 666)));
-
- c.tableOperations().addSplits(tableName, splits);
-
- UtilWaitThread.sleep(100);
-
- Collection<Text> actualSplits = c.tableOperations().listSplits(tableName);
-
- if (!splits.equals(new TreeSet<Text>(actualSplits))) {
- throw new Exception(splits + " != " + actualSplits);
- }
-
- verifyData(tableName, 1l);
- insertData(tableName, 2l);
-
- // did not clear splits on purpose, it should ignore existing split points
- // and still create the three additional split points
-
- splits.add(new Text(String.format("%09d", 200)));
- splits.add(new Text(String.format("%09d", 500)));
- splits.add(new Text(String.format("%09d", 800)));
-
- c.tableOperations().addSplits(tableName, splits);
-
- UtilWaitThread.sleep(100);
-
- actualSplits = c.tableOperations().listSplits(tableName);
-
- if (!splits.equals(new TreeSet<Text>(actualSplits))) {
- throw new Exception(splits + " != " + actualSplits);
- }
-
- verifyData(tableName, 2l);
- }
-
- private void verifyData(String tableName, long ts) throws Exception {
- Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
-
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
-
- for (int i = 0; i < 10000; i++) {
- if (!iter.hasNext()) {
- throw new Exception("row " + i + " not found");
- }
-
- Entry<Key,Value> entry = iter.next();
-
- String row = String.format("%09d", i);
-
- if (!entry.getKey().getRow().equals(new Text(row))) {
- throw new Exception("unexpected row " + entry.getKey() + " " + i);
- }
-
- if (entry.getKey().getTimestamp() != ts) {
- throw new Exception("unexpected ts " + entry.getKey() + " " + ts);
- }
-
- if (Integer.parseInt(entry.getValue().toString()) != i) {
- throw new Exception("unexpected value " + entry + " " + i);
- }
- }
-
- if (iter.hasNext()) {
- throw new Exception("found more than expected " + iter.next());
- }
-
- }
-
- private void insertData(String tableName, long ts) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
- BatchWriter bw = getConnector().createBatchWriter(tableName, null);
-
- for (int i = 0; i < 10000; i++) {
- String row = String.format("%09d", i);
-
- Mutation m = new Mutation(new Text(row));
- m.put(new Text("cf1"), new Text("cq1"), ts, new Value(Integer.toString(i).getBytes(UTF_8)));
- bw.addMutation(m);
- }
-
- bw.close();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BackupMasterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BackupMasterIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BackupMasterIT.java
deleted file mode 100644
index d8979db..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BackupMasterIT.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.List;
-
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.fate.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.master.Master;
-import org.junit.Test;
-
-public class BackupMasterIT extends ConfigurableMacBase {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 120;
- }
-
- @Test
- public void test() throws Exception {
- // wait for master
- UtilWaitThread.sleep(1000);
- // create a backup
- Process backup = exec(Master.class);
- try {
- ZooReaderWriter writer = new ZooReaderWriter(cluster.getZooKeepers(), 30 * 1000, "digest", "accumulo:DONTTELL".getBytes());
- String root = "/accumulo/" + getConnector().getInstance().getInstanceID();
- List<String> children = Collections.emptyList();
- // wait for 2 lock entries
- do {
- UtilWaitThread.sleep(100);
- children = writer.getChildren(root + "/masters/lock");
- } while (children.size() != 2);
- Collections.sort(children);
- // wait for the backup master to learn to be the backup
- UtilWaitThread.sleep(1000);
- // generate a false zookeeper event
- String lockPath = root + "/masters/lock/" + children.get(0);
- byte data[] = writer.getData(lockPath, null);
- writer.getZooKeeper().setData(lockPath, data, -1);
- // let it propagate
- UtilWaitThread.sleep(500);
- // kill the master by removing its lock
- writer.recursiveDelete(lockPath, NodeMissingPolicy.FAIL);
- // ensure the backup becomes the master
- getConnector().tableOperations().create(getUniqueNames(1)[0]);
- } finally {
- backup.destroy();
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java
deleted file mode 100644
index 4c6fc00..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-
-import java.util.EnumSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class BadIteratorMincIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
-
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- IteratorSetting is = new IteratorSetting(30, BadIterator.class);
- c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-
- Mutation m = new Mutation(new Text("r1"));
- m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
-
- bw.addMutation(m);
- bw.close();
-
- c.tableOperations().flush(tableName, null, null, false);
- UtilWaitThread.sleep(1000);
-
- // minc should fail, so there should be no files
- FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
-
- // try to scan table
- Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
- int count = Iterators.size(scanner.iterator());
- assertEquals("Did not see expected # entries " + count, 1, count);
-
- // remove the bad iterator
- c.tableOperations().removeIterator(tableName, BadIterator.class.getSimpleName(), EnumSet.of(IteratorScope.minc));
-
- UtilWaitThread.sleep(5000);
-
- // minc should complete
- FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
-
- count = Iterators.size(scanner.iterator());
-
- if (count != 1)
- throw new Exception("Did not see expected # entries " + count);
-
- // now try putting bad iterator back and deleting the table
- c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
- bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- m = new Mutation(new Text("r2"));
- m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
- bw.addMutation(m);
- bw.close();
-
- // make sure property is given time to propagate
- UtilWaitThread.sleep(500);
-
- c.tableOperations().flush(tableName, null, null, false);
-
- // make sure the flush has time to start
- UtilWaitThread.sleep(1000);
-
- // this should not hang
- c.tableOperations().delete(tableName);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java
deleted file mode 100644
index ae470f6..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.lang.reflect.Field;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.MasterClient;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.master.thrift.MasterClientService;
-import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
-import org.apache.accumulo.core.master.thrift.TableInfo;
-import org.apache.accumulo.core.master.thrift.TabletServerStatus;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Iterables;
-
-public class BalanceAfterCommsFailureIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.GENERAL_RPC_TIMEOUT, "2s");
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = this.getConnector();
- c.tableOperations().create("test");
- Collection<ProcessReference> tservers = getCluster().getProcesses().get(ServerType.TABLET_SERVER);
- ArrayList<Integer> tserverPids = new ArrayList<Integer>(tservers.size());
- for (ProcessReference tserver : tservers) {
- Process p = tserver.getProcess();
- if (!p.getClass().getName().equals("java.lang.UNIXProcess")) {
- log.info("Found process that was not UNIXProcess, exiting test");
- return;
- }
-
- Field f = p.getClass().getDeclaredField("pid");
- f.setAccessible(true);
- tserverPids.add(f.getInt(p));
- }
-
- for (int pid : tserverPids) {
- assertEquals(0, Runtime.getRuntime().exec(new String[] {"kill", "-SIGSTOP", Integer.toString(pid)}).waitFor());
- }
- UtilWaitThread.sleep(20 * 1000);
- for (int pid : tserverPids) {
- assertEquals(0, Runtime.getRuntime().exec(new String[] {"kill", "-SIGCONT", Integer.toString(pid)}).waitFor());
- }
- SortedSet<Text> splits = new TreeSet<Text>();
- for (String split : "a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")) {
- splits.add(new Text(split));
- }
- c.tableOperations().addSplits("test", splits);
- // Ensure all of the tablets are actually assigned
- assertEquals(0, Iterables.size(c.createScanner("test", Authorizations.EMPTY)));
- UtilWaitThread.sleep(30 * 1000);
- checkBalance(c);
- }
-
- private void checkBalance(Connector c) throws Exception {
- Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
- ClientContext context = new ClientContext(c.getInstance(), creds, getClientConfig());
-
- MasterMonitorInfo stats = null;
- int unassignedTablets = 1;
- for (int i = 0; unassignedTablets > 0 && i < 10; i++) {
- MasterClientService.Iface client = null;
- try {
- client = MasterClient.getConnectionWithRetry(context);
- stats = client.getMasterStats(Tracer.traceInfo(), context.rpcCreds());
- } finally {
- if (client != null)
- MasterClient.close(client);
- }
- unassignedTablets = stats.getUnassignedTablets();
- if (unassignedTablets > 0) {
- log.info("Found " + unassignedTablets + " unassigned tablets, sleeping 3 seconds for tablet assignment");
- Thread.sleep(3000);
- }
- }
-
- assertEquals("Unassigned tablets were not assigned within 30 seconds", 0, unassignedTablets);
-
- List<Integer> counts = new ArrayList<Integer>();
- for (TabletServerStatus server : stats.tServerInfo) {
- int count = 0;
- for (TableInfo table : server.tableMap.values()) {
- count += table.onlineTablets;
- }
- counts.add(count);
- }
- assertTrue("Expected to have at least two TabletServers", counts.size() > 1);
- for (int i = 1; i < counts.size(); i++) {
- int diff = Math.abs(counts.get(0) - counts.get(i));
- assertTrue("Expected difference in tablets to be less than or equal to " + counts.size() + " but was " + diff + ". Counts " + counts,
- diff <= counts.size());
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
deleted file mode 100644
index 623d79b..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Arrays;
-import java.util.Map;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.MasterClient;
-import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.master.thrift.MasterClientService;
-import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
-import org.apache.accumulo.core.master.thrift.TableInfo;
-import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.commons.lang.math.NumberUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.apache.thrift.TException;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Start a new table, create many splits, and offline before they can rebalance. Then try to have a different table balance
- */
-public class BalanceInPresenceOfOfflineTableIT extends AccumuloClusterHarness {
-
- private static Logger log = LoggerFactory.getLogger(BalanceInPresenceOfOfflineTableIT.class);
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.TSERV_MAXMEM.getKey(), "10K");
- siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "0");
- cfg.setSiteConfig(siteConfig);
- // ensure we have two tservers
- if (cfg.getNumTservers() < 2) {
- cfg.setNumTservers(2);
- }
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 10 * 60;
- }
-
- private static final int NUM_SPLITS = 200;
-
- private String UNUSED_TABLE, TEST_TABLE;
-
- private Connector connector;
-
- @Before
- public void setupTables() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
- Connector conn = getConnector();
- // Need at least two tservers
- Assume.assumeTrue("Not enough tservers to run test", conn.instanceOperations().getTabletServers().size() >= 2);
-
- // set up splits
- final SortedSet<Text> splits = new TreeSet<Text>();
- for (int i = 0; i < NUM_SPLITS; i++) {
- splits.add(new Text(String.format("%08x", i * 1000)));
- }
-
- String[] names = getUniqueNames(2);
- UNUSED_TABLE = names[0];
- TEST_TABLE = names[1];
-
- // load into a table we won't use
- connector = getConnector();
- connector.tableOperations().create(UNUSED_TABLE);
- connector.tableOperations().addSplits(UNUSED_TABLE, splits);
- // mark the table offline before it can rebalance.
- connector.tableOperations().offline(UNUSED_TABLE);
-
- // actual test table
- connector.tableOperations().create(TEST_TABLE);
- connector.tableOperations().setProperty(TEST_TABLE, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
- }
-
- @Test
- public void test() throws Exception {
- log.info("Test that balancing is not stopped by an offline table with outstanding migrations.");
-
- log.debug("starting test ingestion");
-
- TestIngest.Opts opts = new TestIngest.Opts();
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
- ClientConfiguration conf = cluster.getClientConfig();
- if (conf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(cluster.getClientConfig());
- vopts.updateKerberosCredentials(cluster.getClientConfig());
- } else {
- opts.setPrincipal("root");
- vopts.setPrincipal("root");
- }
- vopts.rows = opts.rows = 200000;
- opts.setTableName(TEST_TABLE);
- TestIngest.ingest(connector, opts, new BatchWriterOpts());
- connector.tableOperations().flush(TEST_TABLE, null, null, true);
- vopts.setTableName(TEST_TABLE);
- VerifyIngest.verifyIngest(connector, vopts, new ScannerOpts());
-
- log.debug("waiting for balancing, up to ~5 minutes to allow for migration cleanup.");
- final long startTime = System.currentTimeMillis();
- long currentWait = 10 * 1000;
- boolean balancingWorked = false;
-
- Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
- while (!balancingWorked && (System.currentTimeMillis() - startTime) < ((5 * 60 + 15) * 1000)) {
- Thread.sleep(currentWait);
- currentWait *= 2;
-
- log.debug("fetch the list of tablets assigned to each tserver.");
-
- MasterClientService.Iface client = null;
- MasterMonitorInfo stats = null;
- try {
- Instance instance = new ZooKeeperInstance(cluster.getClientConfig());
- client = MasterClient.getConnectionWithRetry(new ClientContext(instance, creds, cluster.getClientConfig()));
- stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(instance));
- } catch (ThriftSecurityException exception) {
- throw new AccumuloSecurityException(exception);
- } catch (TException exception) {
- throw new AccumuloException(exception);
- } finally {
- if (client != null) {
- MasterClient.close(client);
- }
- }
-
- if (stats.getTServerInfoSize() < 2) {
- log.debug("we need >= 2 servers. sleeping for " + currentWait + "ms");
- continue;
- }
- if (stats.getUnassignedTablets() != 0) {
- log.debug("We shouldn't have unassigned tablets. sleeping for " + currentWait + "ms");
- continue;
- }
-
- long[] tabletsPerServer = new long[stats.getTServerInfoSize()];
- Arrays.fill(tabletsPerServer, 0l);
- for (int i = 0; i < stats.getTServerInfoSize(); i++) {
- for (Map.Entry<String,TableInfo> entry : stats.getTServerInfo().get(i).getTableMap().entrySet()) {
- tabletsPerServer[i] += entry.getValue().getTablets();
- }
- }
-
- if (tabletsPerServer[0] <= 10) {
- log.debug("We should have > 10 tablets. sleeping for " + currentWait + "ms");
- continue;
- }
- long min = NumberUtils.min(tabletsPerServer), max = NumberUtils.max(tabletsPerServer);
- log.debug("Min=" + min + ", Max=" + max);
- if ((min / ((double) max)) < 0.5) {
- log.debug("ratio of min to max tablets per server should be roughly even. sleeping for " + currentWait + "ms");
- continue;
- }
- balancingWorked = true;
- }
-
- Assert.assertTrue("did not properly balance", balancingWorked);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java
deleted file mode 100644
index 14295c4..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map.Entry;
-import java.util.Random;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class BatchScanSplitIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(BatchScanSplitIT.class);
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.TSERV_MAJC_DELAY, "0");
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
-
- int numRows = 1 << 18;
-
- BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
-
- for (int i = 0; i < numRows; i++) {
- Mutation m = new Mutation(new Text(String.format("%09x", i)));
- m.put(new Text("cf1"), new Text("cq1"), new Value(String.format("%016x", numRows - i).getBytes(UTF_8)));
- bw.addMutation(m);
- }
-
- bw.close();
-
- getConnector().tableOperations().flush(tableName, null, null, true);
-
- getConnector().tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "4K");
-
- Collection<Text> splits = getConnector().tableOperations().listSplits(tableName);
- while (splits.size() < 2) {
- UtilWaitThread.sleep(1);
- splits = getConnector().tableOperations().listSplits(tableName);
- }
-
- System.out.println("splits : " + splits);
-
- Random random = new Random(19011230);
- HashMap<Text,Value> expected = new HashMap<Text,Value>();
- ArrayList<Range> ranges = new ArrayList<Range>();
- for (int i = 0; i < 100; i++) {
- int r = random.nextInt(numRows);
- Text row = new Text(String.format("%09x", r));
- expected.put(row, new Value(String.format("%016x", numRows - r).getBytes(UTF_8)));
- ranges.add(new Range(row));
- }
-
- // logger.setLevel(Level.TRACE);
-
- HashMap<Text,Value> found = new HashMap<Text,Value>();
-
- for (int i = 0; i < 20; i++) {
- BatchScanner bs = getConnector().createBatchScanner(tableName, Authorizations.EMPTY, 4);
-
- found.clear();
-
- long t1 = System.currentTimeMillis();
-
- bs.setRanges(ranges);
-
- for (Entry<Key,Value> entry : bs) {
- found.put(entry.getKey().getRow(), entry.getValue());
- }
- bs.close();
-
- long t2 = System.currentTimeMillis();
-
- log.info(String.format("rate : %06.2f%n", ranges.size() / ((t2 - t1) / 1000.0)));
-
- if (!found.equals(expected))
- throw new Exception("Found and expected differ " + found + " " + expected);
- }
-
- splits = getConnector().tableOperations().listSplits(tableName);
- log.info("splits : " + splits);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
deleted file mode 100644
index 7c05a0f..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class BatchWriterFlushIT extends AccumuloClusterHarness {
-
- private static final int NUM_TO_FLUSH = 100000;
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 90;
- }
-
- @Test
- public void run() throws Exception {
- Connector c = getConnector();
- String[] tableNames = getUniqueNames(2);
- String bwft = tableNames[0];
- c.tableOperations().create(bwft);
- String bwlt = tableNames[1];
- c.tableOperations().create(bwlt);
- runFlushTest(bwft);
- runLatencyTest(bwlt);
-
- }
-
- private void runLatencyTest(String tableName) throws Exception {
- // should automatically flush after 2 seconds
- BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig().setMaxLatency(1000, TimeUnit.MILLISECONDS));
- Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
-
- Mutation m = new Mutation(new Text(String.format("r_%10d", 1)));
- m.put(new Text("cf"), new Text("cq"), new Value("1".getBytes(UTF_8)));
- bw.addMutation(m);
-
- UtilWaitThread.sleep(500);
-
- int count = Iterators.size(scanner.iterator());
-
- if (count != 0) {
- throw new Exception("Flushed too soon");
- }
-
- UtilWaitThread.sleep(1500);
-
- count = Iterators.size(scanner.iterator());
-
- if (count != 1) {
- throw new Exception("Did not flush");
- }
-
- bw.close();
- }
-
- private void runFlushTest(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException,
- Exception {
- BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
- Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
- Random r = new Random();
-
- for (int i = 0; i < 4; i++) {
- for (int j = 0; j < NUM_TO_FLUSH; j++) {
- int row = i * NUM_TO_FLUSH + j;
-
- Mutation m = new Mutation(new Text(String.format("r_%10d", row)));
- m.put(new Text("cf"), new Text("cq"), new Value(("" + row).getBytes()));
- bw.addMutation(m);
- }
-
- bw.flush();
-
- // do a few random lookups into the data just flushed
-
- for (int k = 0; k < 10; k++) {
- int rowToLookup = r.nextInt(NUM_TO_FLUSH) + i * NUM_TO_FLUSH;
-
- scanner.setRange(new Range(new Text(String.format("r_%10d", rowToLookup))));
-
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
-
- if (!iter.hasNext())
- throw new Exception(" row " + rowToLookup + " not found after flush");
-
- Entry<Key,Value> entry = iter.next();
-
- if (iter.hasNext())
- throw new Exception("Scanner returned too much");
-
- verifyEntry(rowToLookup, entry);
- }
-
- // scan all data just flushed
- scanner.setRange(new Range(new Text(String.format("r_%10d", i * NUM_TO_FLUSH)), true, new Text(String.format("r_%10d", (i + 1) * NUM_TO_FLUSH)), false));
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
-
- for (int j = 0; j < NUM_TO_FLUSH; j++) {
- int row = i * NUM_TO_FLUSH + j;
-
- if (!iter.hasNext())
- throw new Exception("Scan stopped permaturely at " + row);
-
- Entry<Key,Value> entry = iter.next();
-
- verifyEntry(row, entry);
- }
-
- if (iter.hasNext())
- throw new Exception("Scanner returned too much");
-
- }
-
- bw.close();
-
- // test adding a mutation to a closed batch writer
- boolean caught = false;
- try {
- bw.addMutation(new Mutation(new Text("foobar")));
- } catch (IllegalStateException ise) {
- caught = true;
- }
-
- if (!caught) {
- throw new Exception("Adding to closed batch writer did not fail");
- }
- }
-
- private void verifyEntry(int row, Entry<Key,Value> entry) throws Exception {
- if (!entry.getKey().getRow().toString().equals(String.format("r_%10d", row))) {
- throw new Exception("Unexpected key returned, expected " + row + " got " + entry.getKey());
- }
-
- if (!entry.getValue().toString().equals("" + row)) {
- throw new Exception("Unexpected value, expected " + row + " got " + entry.getValue());
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
deleted file mode 100644
index 11dcb66..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.Map;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class BigRootTabletIT extends AccumuloClusterHarness {
- // ACCUMULO-542: A large root tablet will fail to load if it does't fit in the tserver scan buffers
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.TABLE_SCAN_MAXMEM.getKey(), "1024");
- siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "60m");
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- c.tableOperations().addSplits(MetadataTable.NAME, FunctionalTestUtils.splits("0 1 2 3 4 5 6 7 8 9 a".split(" ")));
- String[] names = getUniqueNames(10);
- for (String name : names) {
- c.tableOperations().create(name);
- c.tableOperations().flush(MetadataTable.NAME, null, null, true);
- c.tableOperations().flush(RootTable.NAME, null, null, true);
- }
- cluster.stop();
- cluster.start();
- assertTrue(Iterators.size(c.createScanner(RootTable.NAME, Authorizations.EMPTY).iterator()) > 0);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
deleted file mode 100644
index 85716d5..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.TestBinaryRows;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class BinaryIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 90;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- runTest(c, tableName);
- }
-
- @Test
- public void testPreSplit() throws Exception {
- String tableName = getUniqueNames(1)[0];
- Connector c = getConnector();
- c.tableOperations().create(tableName);
- SortedSet<Text> splits = new TreeSet<Text>();
- splits.add(new Text("8"));
- splits.add(new Text("256"));
- c.tableOperations().addSplits(tableName, splits);
- runTest(c, tableName);
- }
-
- public static void runTest(Connector c, String tableName) throws Exception {
- BatchWriterOpts bwOpts = new BatchWriterOpts();
- ScannerOpts scanOpts = new ScannerOpts();
- TestBinaryRows.Opts opts = new TestBinaryRows.Opts();
- opts.setTableName(tableName);
- opts.start = 0;
- opts.num = 100000;
- opts.mode = "ingest";
- TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
- opts.mode = "verify";
- TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
- opts.start = 25000;
- opts.num = 50000;
- opts.mode = "delete";
- TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
- opts.start = 0;
- opts.num = 25000;
- opts.mode = "verify";
- TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
- opts.start = 75000;
- opts.num = 25000;
- opts.mode = "randomLookups";
- TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
- opts.start = 25000;
- opts.num = 50000;
- opts.mode = "verifyDeleted";
- TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BinaryStressIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BinaryStressIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BinaryStressIT.java
deleted file mode 100644
index 440d2cf..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BinaryStressIT.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.InstanceOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class BinaryStressIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "3s");
- cfg.setProperty(Property.TSERV_MAXMEM, "50K");
- cfg.setProperty(Property.TSERV_MAJC_DELAY, "0");
- }
-
- private String majcDelay, maxMem;
-
- @Before
- public void alterConfig() throws Exception {
- if (ClusterType.MINI == getClusterType()) {
- return;
- }
-
- InstanceOperations iops = getConnector().instanceOperations();
- Map<String,String> conf = iops.getSystemConfiguration();
- majcDelay = conf.get(Property.TSERV_MAJC_DELAY.getKey());
- maxMem = conf.get(Property.TSERV_MAXMEM.getKey());
-
- iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "0");
- iops.setProperty(Property.TSERV_MAXMEM.getKey(), "50K");
-
- getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- getClusterControl().startAllServers(ServerType.TABLET_SERVER);
- }
-
- @After
- public void resetConfig() throws Exception {
- if (null != majcDelay) {
- InstanceOperations iops = getConnector().instanceOperations();
- iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
- iops.setProperty(Property.TSERV_MAXMEM.getKey(), maxMem);
-
- getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- getClusterControl().startAllServers(ServerType.TABLET_SERVER);
- }
- }
-
- @Test
- public void binaryStressTest() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
- BinaryIT.runTest(c, tableName);
- String id = c.tableOperations().tableIdMap().get(tableName);
- Set<Text> tablets = new HashSet<>();
- Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(Range.prefix(id));
- for (Entry<Key,Value> entry : s) {
- tablets.add(entry.getKey().getRow());
- }
- assertTrue("Expected at least 8 tablets, saw " + tablets.size(), tablets.size() > 7);
- }
-
-}
[11/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java b/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
deleted file mode 100644
index 31d1329..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.net.ConnectException;
-import java.net.InetAddress;
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.rpc.UGIAssumingTransport;
-import org.apache.accumulo.harness.AccumuloITBase;
-import org.apache.accumulo.harness.MiniClusterConfigurationCallback;
-import org.apache.accumulo.harness.MiniClusterHarness;
-import org.apache.accumulo.harness.TestingKdc;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.proxy.Proxy;
-import org.apache.accumulo.proxy.ProxyServer;
-import org.apache.accumulo.proxy.thrift.AccumuloProxy;
-import org.apache.accumulo.proxy.thrift.AccumuloProxy.Client;
-import org.apache.accumulo.proxy.thrift.AccumuloSecurityException;
-import org.apache.accumulo.proxy.thrift.ColumnUpdate;
-import org.apache.accumulo.proxy.thrift.Key;
-import org.apache.accumulo.proxy.thrift.KeyValue;
-import org.apache.accumulo.proxy.thrift.ScanOptions;
-import org.apache.accumulo.proxy.thrift.ScanResult;
-import org.apache.accumulo.proxy.thrift.TimeType;
-import org.apache.accumulo.proxy.thrift.WriterOptions;
-import org.apache.accumulo.server.util.PortUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.transport.TSaslClientTransport;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransportException;
-import org.hamcrest.Description;
-import org.hamcrest.TypeSafeMatcher;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Tests impersonation of clients by the proxy over SASL
- */
-public class KerberosProxyIT extends AccumuloITBase {
- private static final Logger log = LoggerFactory.getLogger(KerberosProxyIT.class);
-
- @Rule
- public ExpectedException thrown = ExpectedException.none();
-
- private static TestingKdc kdc;
- private static String krbEnabledForITs = null;
- private static File proxyKeytab;
- private static String hostname, proxyPrimary, proxyPrincipal;
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60 * 5;
- }
-
- @BeforeClass
- public static void startKdc() throws Exception {
- kdc = new TestingKdc();
- kdc.start();
- krbEnabledForITs = System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION);
- if (null == krbEnabledForITs || !Boolean.parseBoolean(krbEnabledForITs)) {
- System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, "true");
- }
-
- // Create a principal+keytab for the proxy
- proxyKeytab = new File(kdc.getKeytabDir(), "proxy.keytab");
- hostname = InetAddress.getLocalHost().getCanonicalHostName();
- // Set the primary because the client needs to know it
- proxyPrimary = "proxy";
- // Qualify with an instance
- proxyPrincipal = proxyPrimary + "/" + hostname;
- kdc.createPrincipal(proxyKeytab, proxyPrincipal);
- // Tack on the realm too
- proxyPrincipal = kdc.qualifyUser(proxyPrincipal);
- }
-
- @AfterClass
- public static void stopKdc() throws Exception {
- if (null != kdc) {
- kdc.stop();
- }
- if (null != krbEnabledForITs) {
- System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs);
- }
- }
-
- private MiniAccumuloClusterImpl mac;
- private Process proxyProcess;
- private int proxyPort;
-
- @Before
- public void startMac() throws Exception {
- MiniClusterHarness harness = new MiniClusterHarness();
- mac = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"), new MiniClusterConfigurationCallback() {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
- cfg.setNumTservers(1);
- Map<String,String> siteCfg = cfg.getSiteConfig();
- // Allow the proxy to impersonate the client user, but no one else
- siteCfg.put(Property.INSTANCE_RPC_SASL_PROXYUSERS.getKey() + proxyPrincipal + ".users", kdc.getRootUser().getPrincipal());
- siteCfg.put(Property.INSTANCE_RPC_SASL_PROXYUSERS.getKey() + proxyPrincipal + ".hosts", "*");
- cfg.setSiteConfig(siteCfg);
- }
-
- }, kdc);
-
- mac.start();
- MiniAccumuloConfigImpl cfg = mac.getConfig();
-
- // Proxy configuration
- proxyPort = PortUtils.getRandomFreePort();
- File proxyPropertiesFile = new File(cfg.getConfDir(), "proxy.properties");
- Properties proxyProperties = new Properties();
- proxyProperties.setProperty("useMockInstance", "false");
- proxyProperties.setProperty("useMiniAccumulo", "false");
- proxyProperties.setProperty("protocolFactory", TCompactProtocol.Factory.class.getName());
- proxyProperties.setProperty("tokenClass", KerberosToken.class.getName());
- proxyProperties.setProperty("port", Integer.toString(proxyPort));
- proxyProperties.setProperty("maxFrameSize", "16M");
- proxyProperties.setProperty("instance", mac.getInstanceName());
- proxyProperties.setProperty("zookeepers", mac.getZooKeepers());
- proxyProperties.setProperty("thriftServerType", "sasl");
- proxyProperties.setProperty("kerberosPrincipal", proxyPrincipal);
- proxyProperties.setProperty("kerberosKeytab", proxyKeytab.getCanonicalPath());
-
- // Write out the proxy.properties file
- FileWriter writer = new FileWriter(proxyPropertiesFile);
- proxyProperties.store(writer, "Configuration for Accumulo proxy");
- writer.close();
-
- proxyProcess = mac.exec(Proxy.class, "-p", proxyPropertiesFile.getCanonicalPath());
-
- // Enabled kerberos auth
- Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- UserGroupInformation.setConfiguration(conf);
-
- boolean success = false;
- ClusterUser rootUser = kdc.getRootUser();
- for (int i = 0; i < 10 && !success; i++) {
-
- UserGroupInformation ugi;
- try {
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- ugi = UserGroupInformation.getCurrentUser();
- } catch (IOException ex) {
- log.info("Login as root is failing", ex);
- Thread.sleep(1000);
- continue;
- }
-
- TSocket socket = new TSocket(hostname, proxyPort);
- log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
- TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
- "auth"), null, socket);
-
- final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
-
- try {
- // UGI transport will perform the doAs for us
- ugiTransport.open();
- success = true;
- } catch (TTransportException e) {
- Throwable cause = e.getCause();
- if (null != cause && cause instanceof ConnectException) {
- log.info("Proxy not yet up, waiting");
- Thread.sleep(1000);
- continue;
- }
- } finally {
- if (null != ugiTransport) {
- ugiTransport.close();
- }
- }
- }
-
- assertTrue("Failed to connect to the proxy repeatedly", success);
- }
-
- @After
- public void stopMac() throws Exception {
- if (null != proxyProcess) {
- log.info("Destroying proxy process");
- proxyProcess.destroy();
- log.info("Waiting for proxy termination");
- proxyProcess.waitFor();
- log.info("Proxy terminated");
- }
- if (null != mac) {
- mac.stop();
- }
- }
-
- @Test
- public void testProxyClient() throws Exception {
- ClusterUser rootUser = kdc.getRootUser();
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-
- TSocket socket = new TSocket(hostname, proxyPort);
- log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
- TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
- "auth"), null, socket);
-
- final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
-
- // UGI transport will perform the doAs for us
- ugiTransport.open();
-
- AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
- Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
-
- // Will fail if the proxy can impersonate the client
- ByteBuffer login = client.login(rootUser.getPrincipal(), Collections.<String,String> emptyMap());
-
- // For all of the below actions, the proxy user doesn't have permission to do any of them, but the client user does.
- // The fact that any of them actually run tells us that impersonation is working.
-
- // Create a table
- String table = "table";
- if (!client.tableExists(login, table)) {
- client.createTable(login, table, true, TimeType.MILLIS);
- }
-
- // Write two records to the table
- String writer = client.createWriter(login, table, new WriterOptions());
- Map<ByteBuffer,List<ColumnUpdate>> updates = new HashMap<>();
- ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap("cf1".getBytes(UTF_8)), ByteBuffer.wrap("cq1".getBytes(UTF_8)));
- update.setValue(ByteBuffer.wrap("value1".getBytes(UTF_8)));
- updates.put(ByteBuffer.wrap("row1".getBytes(UTF_8)), Collections.<ColumnUpdate> singletonList(update));
- update = new ColumnUpdate(ByteBuffer.wrap("cf2".getBytes(UTF_8)), ByteBuffer.wrap("cq2".getBytes(UTF_8)));
- update.setValue(ByteBuffer.wrap("value2".getBytes(UTF_8)));
- updates.put(ByteBuffer.wrap("row2".getBytes(UTF_8)), Collections.<ColumnUpdate> singletonList(update));
- client.update(writer, updates);
-
- // Flush and close the writer
- client.flush(writer);
- client.closeWriter(writer);
-
- // Open a scanner to the table
- String scanner = client.createScanner(login, table, new ScanOptions());
- ScanResult results = client.nextK(scanner, 10);
- assertEquals(2, results.getResults().size());
-
- // Check the first key-value
- KeyValue kv = results.getResults().get(0);
- Key k = kv.key;
- ByteBuffer v = kv.value;
- assertEquals(ByteBuffer.wrap("row1".getBytes(UTF_8)), k.row);
- assertEquals(ByteBuffer.wrap("cf1".getBytes(UTF_8)), k.colFamily);
- assertEquals(ByteBuffer.wrap("cq1".getBytes(UTF_8)), k.colQualifier);
- assertEquals(ByteBuffer.wrap(new byte[0]), k.colVisibility);
- assertEquals(ByteBuffer.wrap("value1".getBytes(UTF_8)), v);
-
- // And then the second
- kv = results.getResults().get(1);
- k = kv.key;
- v = kv.value;
- assertEquals(ByteBuffer.wrap("row2".getBytes(UTF_8)), k.row);
- assertEquals(ByteBuffer.wrap("cf2".getBytes(UTF_8)), k.colFamily);
- assertEquals(ByteBuffer.wrap("cq2".getBytes(UTF_8)), k.colQualifier);
- assertEquals(ByteBuffer.wrap(new byte[0]), k.colVisibility);
- assertEquals(ByteBuffer.wrap("value2".getBytes(UTF_8)), v);
-
- // Close the scanner
- client.closeScanner(scanner);
-
- ugiTransport.close();
- }
-
- @Test
- public void testDisallowedClientForImpersonation() throws Exception {
- String user = testName.getMethodName();
- File keytab = new File(kdc.getKeytabDir(), user + ".keytab");
- kdc.createPrincipal(keytab, user);
-
- // Login as the new user
- UserGroupInformation.loginUserFromKeytab(user, keytab.getAbsolutePath());
- UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-
- log.info("Logged in as " + ugi);
-
- // Expect an AccumuloSecurityException
- thrown.expect(AccumuloSecurityException.class);
- // Error msg would look like:
- //
- // org.apache.accumulo.core.client.AccumuloSecurityException: Error BAD_CREDENTIALS for user Principal in credentials object should match kerberos
- // principal.
- // Expected 'proxy/hw10447.local@EXAMPLE.COM' but was 'testDisallowedClientForImpersonation@EXAMPLE.COM' - Username or Password is Invalid)
- thrown.expect(new ThriftExceptionMatchesPattern(".*Error BAD_CREDENTIALS.*"));
- thrown.expect(new ThriftExceptionMatchesPattern(".*Expected '" + proxyPrincipal + "' but was '" + kdc.qualifyUser(user) + "'.*"));
-
- TSocket socket = new TSocket(hostname, proxyPort);
- log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
-
- // Should fail to open the tran
- TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
- "auth"), null, socket);
-
- final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
-
- // UGI transport will perform the doAs for us
- ugiTransport.open();
-
- AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
- Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
-
- // Will fail because the proxy can't impersonate this user (per the site configuration)
- try {
- client.login(kdc.qualifyUser(user), Collections.<String,String> emptyMap());
- } finally {
- if (null != ugiTransport) {
- ugiTransport.close();
- }
- }
- }
-
- @Test
- public void testMismatchPrincipals() throws Exception {
- ClusterUser rootUser = kdc.getRootUser();
- // Should get an AccumuloSecurityException and the given message
- thrown.expect(AccumuloSecurityException.class);
- thrown.expect(new ThriftExceptionMatchesPattern(ProxyServer.RPC_ACCUMULO_PRINCIPAL_MISMATCH_MSG));
-
- // Make a new user
- String user = testName.getMethodName();
- File keytab = new File(kdc.getKeytabDir(), user + ".keytab");
- kdc.createPrincipal(keytab, user);
-
- // Login as the new user
- UserGroupInformation.loginUserFromKeytab(user, keytab.getAbsolutePath());
- UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-
- log.info("Logged in as " + ugi);
-
- TSocket socket = new TSocket(hostname, proxyPort);
- log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
-
- // Should fail to open the tran
- TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
- "auth"), null, socket);
-
- final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
-
- // UGI transport will perform the doAs for us
- ugiTransport.open();
-
- AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
- Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
-
- // The proxy needs to recognize that the requested principal isn't the same as the SASL principal and fail
- // Accumulo should let this through -- we need to rely on the proxy to dump me before talking to accumulo
- try {
- client.login(rootUser.getPrincipal(), Collections.<String,String> emptyMap());
- } finally {
- if (null != ugiTransport) {
- ugiTransport.close();
- }
- }
- }
-
- private static class ThriftExceptionMatchesPattern extends TypeSafeMatcher<AccumuloSecurityException> {
- private String pattern;
-
- public ThriftExceptionMatchesPattern(String pattern) {
- this.pattern = pattern;
- }
-
- @Override
- protected boolean matchesSafely(AccumuloSecurityException item) {
- return item.isSetMsg() && item.msg.matches(pattern);
- }
-
- @Override
- public void describeTo(Description description) {
- description.appendText("matches pattern ").appendValue(pattern);
- }
-
- @Override
- protected void describeMismatchSafely(AccumuloSecurityException item, Description mismatchDescription) {
- mismatchDescription.appendText("does not match");
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
deleted file mode 100644
index 72b51eb..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LargeRowIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(LargeRowIT.class);
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setMemory(ServerType.TABLET_SERVER, cfg.getMemory(ServerType.TABLET_SERVER) * 2, MemoryUnit.BYTE);
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "10ms");
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- private static final int SEED = 42;
- private static final int NUM_ROWS = 100;
- private static final int ROW_SIZE = 1 << 17;
- private static final int NUM_PRE_SPLITS = 9;
- private static final int SPLIT_THRESH = ROW_SIZE * NUM_ROWS / NUM_PRE_SPLITS;
-
- private String REG_TABLE_NAME;
- private String PRE_SPLIT_TABLE_NAME;
- private int timeoutFactor = 1;
- private String tservMajcDelay;
-
- @Before
- public void getTimeoutFactor() throws Exception {
- try {
- timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
- } catch (NumberFormatException e) {
- log.warn("Could not parse property value for 'timeout.factor' as integer: " + System.getProperty("timeout.factor"));
- }
-
- Assert.assertTrue("Timeout factor must be greater than or equal to 1", timeoutFactor >= 1);
-
- String[] names = getUniqueNames(2);
- REG_TABLE_NAME = names[0];
- PRE_SPLIT_TABLE_NAME = names[1];
-
- Connector c = getConnector();
- tservMajcDelay = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
- c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "10ms");
- }
-
- @After
- public void resetMajcDelay() throws Exception {
- if (null != tservMajcDelay) {
- Connector conn = getConnector();
- conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay);
- }
- }
-
- @Test
- public void run() throws Exception {
- Random r = new Random();
- byte rowData[] = new byte[ROW_SIZE];
- r.setSeed(SEED + 1);
- TreeSet<Text> splitPoints = new TreeSet<Text>();
- for (int i = 0; i < NUM_PRE_SPLITS; i++) {
- r.nextBytes(rowData);
- TestIngest.toPrintableChars(rowData);
- splitPoints.add(new Text(rowData));
- }
- Connector c = getConnector();
- c.tableOperations().create(REG_TABLE_NAME);
- c.tableOperations().create(PRE_SPLIT_TABLE_NAME);
- c.tableOperations().setProperty(PRE_SPLIT_TABLE_NAME, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "256K");
- UtilWaitThread.sleep(3 * 1000);
- c.tableOperations().addSplits(PRE_SPLIT_TABLE_NAME, splitPoints);
- test1(c);
- test2(c);
- }
-
- private void test1(Connector c) throws Exception {
-
- basicTest(c, REG_TABLE_NAME, 0);
-
- c.tableOperations().setProperty(REG_TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "" + SPLIT_THRESH);
-
- UtilWaitThread.sleep(timeoutFactor * 12000);
- log.info("checking splits");
- FunctionalTestUtils.checkSplits(c, REG_TABLE_NAME, NUM_PRE_SPLITS / 2, NUM_PRE_SPLITS * 4);
-
- verify(c, REG_TABLE_NAME);
- }
-
- private void test2(Connector c) throws Exception {
- basicTest(c, PRE_SPLIT_TABLE_NAME, NUM_PRE_SPLITS);
- }
-
- private void basicTest(Connector c, String table, int expectedSplits) throws Exception {
- BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
-
- Random r = new Random();
- byte rowData[] = new byte[ROW_SIZE];
-
- r.setSeed(SEED);
-
- for (int i = 0; i < NUM_ROWS; i++) {
-
- r.nextBytes(rowData);
- TestIngest.toPrintableChars(rowData);
-
- Mutation mut = new Mutation(new Text(rowData));
- mut.put(new Text(""), new Text(""), new Value(Integer.toString(i).getBytes(UTF_8)));
- bw.addMutation(mut);
- }
-
- bw.close();
-
- FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
-
- verify(c, table);
-
- FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
-
- c.tableOperations().flush(table, null, null, false);
-
- // verify while table flush is running
- verify(c, table);
-
- // give split time to complete
- c.tableOperations().flush(table, null, null, true);
-
- FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
-
- verify(c, table);
-
- FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
- }
-
- private void verify(Connector c, String table) throws Exception {
- Random r = new Random();
- byte rowData[] = new byte[ROW_SIZE];
-
- r.setSeed(SEED);
-
- Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
-
- for (int i = 0; i < NUM_ROWS; i++) {
-
- r.nextBytes(rowData);
- TestIngest.toPrintableChars(rowData);
-
- scanner.setRange(new Range(new Text(rowData)));
-
- int count = 0;
-
- for (Entry<Key,Value> entry : scanner) {
- if (!entry.getKey().getRow().equals(new Text(rowData))) {
- throw new Exception("verification failed, unexpected row i =" + i);
- }
- if (!entry.getValue().equals(Integer.toString(i).getBytes(UTF_8))) {
- throw new Exception("verification failed, unexpected value i =" + i + " value = " + entry.getValue());
- }
- count++;
- }
-
- if (count != 1) {
- throw new Exception("verification failed, unexpected count i =" + i + " count=" + count);
- }
-
- }
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
deleted file mode 100644
index 9c310f0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Collections;
-
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-/**
- * Fake the "tablet stops talking but holds its lock" problem we see when hard drives and NFS fail. Start a ZombieTServer, and see that master stops it.
- */
-public class LateLastContactIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setSiteConfig(Collections.singletonMap(Property.GENERAL_RPC_TIMEOUT.getKey(), "2s"));
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void test() throws Exception {
- Process zombie = cluster.exec(ZombieTServer.class);
- assertEquals(0, zombie.waitFor());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
deleted file mode 100644
index 1e7fef0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.NewTableConfiguration;
-import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class LogicalTimeIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(LogicalTimeIT.class);
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- @Test
- public void run() throws Exception {
- int tc = 0;
- String tableName = getUniqueNames(1)[0];
- Connector c = getConnector();
- runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a"}, null, null, "b", 2l);
- runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"z"}, null, null, "b", 2l);
- runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "z"}, null, null, "b", 2l);
- runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "c", "z"}, null, null, "b", 3l);
- runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "y", "z"}, null, null, "b", 3l);
-
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, null, "b", 2l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, null, "b", 2l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, null, "b", 2l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, null, "b", 2l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, null, "b", 3l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, null, "b", 3l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, null, "b", 3l);
-
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, "h", "b", 2l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, "h", "b", 2l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, "h", "b", 1l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, "h", "b", 2l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, "h", "b", 3l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, "h", "b", 3l);
- runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, "h", "b", 2l);
-
- }
-
- private void runMergeTest(Connector conn, String table, String[] splits, String[] inserts, String start, String end, String last, long expected)
- throws Exception {
- log.info("table " + table);
- conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
- TreeSet<Text> splitSet = new TreeSet<Text>();
- for (String split : splits) {
- splitSet.add(new Text(split));
- }
- conn.tableOperations().addSplits(table, splitSet);
-
- BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
- for (String row : inserts) {
- Mutation m = new Mutation(row);
- m.put("cf", "cq", "v");
- bw.addMutation(m);
- }
-
- bw.flush();
-
- conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
-
- Mutation m = new Mutation(last);
- m.put("cf", "cq", "v");
- bw.addMutation(m);
- bw.flush();
-
- Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
- scanner.setRange(new Range(last));
-
- bw.close();
-
- long time = scanner.iterator().next().getKey().getTimestamp();
- if (time != expected)
- throw new RuntimeException("unexpected time " + time + " " + expected);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
deleted file mode 100644
index 8c4666c..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.Collections;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.Base64;
-import org.apache.accumulo.examples.simple.mapreduce.RowHash;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MapReduceIT extends ConfigurableMacBase {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- public static final String hadoopTmpDirArg = "-Dhadoop.tmp.dir=" + System.getProperty("user.dir") + "/target/hadoop-tmp";
-
- static final String tablename = "mapredf";
- static final String input_cf = "cf-HASHTYPE";
- static final String input_cq = "cq-NOTHASHED";
- static final String input_cfcq = input_cf + ":" + input_cq;
- static final String output_cq = "cq-MD4BASE64";
- static final String output_cfcq = input_cf + ":" + output_cq;
-
- @Test
- public void test() throws Exception {
- runTest(getConnector(), getCluster());
- }
-
- static void runTest(Connector c, MiniAccumuloClusterImpl cluster) throws AccumuloException, AccumuloSecurityException, TableExistsException,
- TableNotFoundException, MutationsRejectedException, IOException, InterruptedException, NoSuchAlgorithmException {
- c.tableOperations().create(tablename);
- BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
- for (int i = 0; i < 10; i++) {
- Mutation m = new Mutation("" + i);
- m.put(input_cf, input_cq, "row" + i);
- bw.addMutation(m);
- }
- bw.close();
- Process hash = cluster.exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-i", c.getInstance().getInstanceName(), "-z", c.getInstance()
- .getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "-t", tablename, "--column", input_cfcq);
- assertEquals(0, hash.waitFor());
-
- Scanner s = c.createScanner(tablename, Authorizations.EMPTY);
- s.fetchColumn(new Text(input_cf), new Text(output_cq));
- int i = 0;
- for (Entry<Key,Value> entry : s) {
- MessageDigest md = MessageDigest.getInstance("MD5");
- byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes()));
- assertEquals(entry.getValue().toString(), new String(check));
- i++;
- }
-
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
deleted file mode 100644
index 72f8ce7..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-
-import java.io.FileNotFoundException;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.server.master.state.MetaDataTableScanner;
-import org.apache.accumulo.server.master.state.TabletLocationState;
-import org.apache.commons.configuration.ConfigurationException;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MasterAssignmentIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- String tableName = super.getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- String tableId = c.tableOperations().tableIdMap().get(tableName);
- // wait for the table to be online
- TabletLocationState newTablet;
- do {
- UtilWaitThread.sleep(250);
- newTablet = getTabletLocationState(c, tableId);
- } while (newTablet.current == null);
- assertNull(newTablet.last);
- assertNull(newTablet.future);
-
- // put something in it
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("a");
- m.put("b", "c", "d");
- bw.addMutation(m);
- bw.close();
- // give it a last location
- c.tableOperations().flush(tableName, null, null, true);
-
- TabletLocationState flushed = getTabletLocationState(c, tableId);
- assertEquals(newTablet.current, flushed.current);
- assertEquals(flushed.current, flushed.last);
- assertNull(newTablet.future);
-
- // take the tablet offline
- c.tableOperations().offline(tableName, true);
- TabletLocationState offline = getTabletLocationState(c, tableId);
- assertNull(offline.future);
- assertNull(offline.current);
- assertEquals(flushed.current, offline.last);
-
- // put it back online
- c.tableOperations().online(tableName, true);
- TabletLocationState online = getTabletLocationState(c, tableId);
- assertNull(online.future);
- assertNotNull(online.current);
- assertEquals(online.current, online.last);
- }
-
- private TabletLocationState getTabletLocationState(Connector c, String tableId) throws FileNotFoundException, ConfigurationException {
- Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
- ClientContext context = new ClientContext(c.getInstance(), creds, getCluster().getClientConfig());
- MetaDataTableScanner s = new MetaDataTableScanner(context, new Range(KeyExtent.getMetadataEntry(new Text(tableId), null)));
- TabletLocationState tlState = s.next();
- s.close();
- return tlState;
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
deleted file mode 100644
index 3489c26..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Map;
-
-import org.apache.accumulo.cluster.ClusterControl;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-public class MasterFailoverIT extends AccumuloClusterHarness {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s");
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 90;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- String[] names = getUniqueNames(2);
- c.tableOperations().create(names[0]);
- TestIngest.Opts opts = new TestIngest.Opts();
- opts.setTableName(names[0]);
- ClientConfiguration clientConf = cluster.getClientConfig();
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConf);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- }
- TestIngest.ingest(c, opts, new BatchWriterOpts());
-
- ClusterControl control = cluster.getClusterControl();
- control.stopAllServers(ServerType.MASTER);
- // start up a new one
- control.startAllServers(ServerType.MASTER);
- // talk to it
- c.tableOperations().rename(names[0], names[1]);
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
- vopts.setTableName(names[1]);
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- vopts.updateKerberosCredentials(clientConf);
- } else {
- vopts.setPrincipal(getAdminPrincipal());
- }
- VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
deleted file mode 100644
index 6f08c1f..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.admin.InstanceOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * A functional test that exercises hitting the max open file limit on a tablet server. This test assumes there are one or two tablet servers.
- */
-
-public class MaxOpenIT extends AccumuloClusterHarness {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> conf = cfg.getSiteConfig();
- conf.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "4");
- conf.put(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
- conf.put(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), "2");
- cfg.setSiteConfig(conf);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 3 * 60;
- }
-
- private String scanMaxOpenFiles, majcConcurrent, majcThreadMaxOpen;
-
- @Before
- public void alterConfig() throws Exception {
- InstanceOperations iops = getConnector().instanceOperations();
- Map<String,String> sysConfig = iops.getSystemConfiguration();
- scanMaxOpenFiles = sysConfig.get(Property.TSERV_SCAN_MAX_OPENFILES.getKey());
- majcConcurrent = sysConfig.get(Property.TSERV_MAJC_MAXCONCURRENT.getKey());
- majcThreadMaxOpen = sysConfig.get(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey());
- }
-
- @After
- public void restoreConfig() throws Exception {
- InstanceOperations iops = getConnector().instanceOperations();
- if (null != scanMaxOpenFiles) {
- iops.setProperty(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), scanMaxOpenFiles);
- }
- if (null != majcConcurrent) {
- iops.setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), majcConcurrent);
- }
- if (null != majcThreadMaxOpen) {
- iops.setProperty(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), majcThreadMaxOpen);
- }
- }
-
- private static final int NUM_TABLETS = 16;
- private static final int NUM_TO_INGEST = 10000;
-
- @Test
- public void run() throws Exception {
- final Connector c = getConnector();
- final String tableName = getUniqueNames(1)[0];
- final ClientConfiguration clientConf = cluster.getClientConfig();
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "10");
- c.tableOperations().addSplits(tableName, TestIngest.getSplitPoints(0, NUM_TO_INGEST, NUM_TABLETS));
-
- // the following loop should create three tablets in each map file
- for (int i = 0; i < 3; i++) {
- TestIngest.Opts opts = new TestIngest.Opts();
- opts.timestamp = i;
- opts.dataSize = 50;
- opts.rows = NUM_TO_INGEST;
- opts.cols = 1;
- opts.random = i;
- opts.setTableName(tableName);
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConf);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- }
- TestIngest.ingest(c, opts, new BatchWriterOpts());
-
- c.tableOperations().flush(tableName, null, null, true);
- FunctionalTestUtils.checkRFiles(c, tableName, NUM_TABLETS, NUM_TABLETS, i + 1, i + 1);
- }
-
- List<Range> ranges = new ArrayList<Range>(NUM_TO_INGEST);
-
- for (int i = 0; i < NUM_TO_INGEST; i++) {
- ranges.add(new Range(TestIngest.generateRow(i, 0)));
- }
-
- long time1 = batchScan(c, tableName, ranges, 1);
- // run it again, now that stuff is cached on the client and sever
- time1 = batchScan(c, tableName, ranges, 1);
- long time2 = batchScan(c, tableName, ranges, NUM_TABLETS);
-
- System.out.printf("Single thread scan time %6.2f %n", time1 / 1000.0);
- System.out.printf("Multiple thread scan time %6.2f %n", time2 / 1000.0);
-
- }
-
- private long batchScan(Connector c, String tableName, List<Range> ranges, int threads) throws Exception {
- BatchScanner bs = c.createBatchScanner(tableName, TestIngest.AUTHS, threads);
-
- bs.setRanges(ranges);
-
- int count = 0;
-
- long t1 = System.currentTimeMillis();
-
- byte rval[] = new byte[50];
- Random random = new Random();
-
- for (Entry<Key,Value> entry : bs) {
- count++;
- int row = VerifyIngest.getRow(entry.getKey());
- int col = VerifyIngest.getCol(entry.getKey());
-
- if (row < 0 || row >= NUM_TO_INGEST) {
- throw new Exception("unexcepted row " + row);
- }
-
- rval = TestIngest.genRandomValue(random, rval, 2, row, col);
-
- if (entry.getValue().compareTo(rval) != 0) {
- throw new Exception("unexcepted value row=" + row + " col=" + col);
- }
- }
-
- long t2 = System.currentTimeMillis();
-
- bs.close();
-
- if (count != NUM_TO_INGEST) {
- throw new Exception("Batch Scan did not return expected number of values " + count);
- }
-
- return t2 - t1;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
deleted file mode 100644
index 9e3e8b6..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.NewTableConfiguration;
-import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.Merge;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MergeIT extends AccumuloClusterHarness {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 8 * 60;
- }
-
- SortedSet<Text> splits(String[] points) {
- SortedSet<Text> result = new TreeSet<Text>();
- for (String point : points)
- result.add(new Text(point));
- return result;
- }
-
- @Test
- public void merge() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" ")));
- BatchWriter bw = c.createBatchWriter(tableName, null);
- for (String row : "a b c d e f g h i j k".split(" ")) {
- Mutation m = new Mutation(row);
- m.put("cf", "cq", "value");
- bw.addMutation(m);
- }
- bw.close();
- c.tableOperations().flush(tableName, null, null, true);
- c.tableOperations().merge(tableName, new Text("c1"), new Text("f1"));
- assertEquals(8, c.tableOperations().listSplits(tableName).size());
- }
-
- @Test
- public void mergeSize() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
- BatchWriter bw = c.createBatchWriter(tableName, null);
- for (String row : "c e f y".split(" ")) {
- Mutation m = new Mutation(row);
- m.put("cf", "cq", "mersydotesanddozeydotesanlittolamsiedives");
- bw.addMutation(m);
- }
- bw.close();
- c.tableOperations().flush(tableName, null, null, true);
- Merge merge = new Merge();
- merge.mergomatic(c, tableName, null, null, 100, false);
- assertArrayEquals("b c d e f x y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
- merge.mergomatic(c, tableName, null, null, 100, true);
- assertArrayEquals("c e f y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
- }
-
- private String[] toStrings(Collection<Text> listSplits) {
- String[] result = new String[listSplits.size()];
- int i = 0;
- for (Text t : listSplits) {
- result[i++] = t.toString();
- }
- return result;
- }
-
- private String[] ns(String... strings) {
- return strings;
- }
-
- @Test
- public void mergeTest() throws Exception {
- int tc = 0;
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- runMergeTest(c, tableName + tc++, ns(), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
-
- runMergeTest(c, tableName + tc++, ns("m"), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
- runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns("m", "n"), ns(null, "z"));
- runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns(null, "b"), ns("l", "m"));
-
- runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns(), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns(null, "s"));
- runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("c", "m"));
- runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("n", "r"));
- runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns(null, "s"));
- runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns(null, "s"));
- runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("q", "r"));
- runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("aa", "b"));
- runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("r", "s"), ns(null, "z"));
- runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("l", "m"));
- runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns("q", "r"));
-
- }
-
- private void runMergeTest(Connector c, String table, String[] splits, String[] expectedSplits, String[] inserts, String[] start, String[] end)
- throws Exception {
- int count = 0;
-
- for (String s : start) {
- for (String e : end) {
- runMergeTest(c, table + "_" + count++, splits, expectedSplits, inserts, s, e);
- }
- }
- }
-
- private void runMergeTest(Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end)
- throws Exception {
- System.out.println("Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
-
- conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
- TreeSet<Text> splitSet = new TreeSet<Text>();
- for (String split : splits) {
- splitSet.add(new Text(split));
- }
- conn.tableOperations().addSplits(table, splitSet);
-
- BatchWriter bw = conn.createBatchWriter(table, null);
- HashSet<String> expected = new HashSet<String>();
- for (String row : inserts) {
- Mutation m = new Mutation(row);
- m.put("cf", "cq", row);
- bw.addMutation(m);
- expected.add(row);
- }
-
- bw.close();
-
- conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
-
- Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
-
- HashSet<String> observed = new HashSet<String>();
- for (Entry<Key,Value> entry : scanner) {
- String row = entry.getKey().getRowData().toString();
- if (!observed.add(row)) {
- throw new Exception("Saw data twice " + table + " " + row);
- }
- }
-
- if (!observed.equals(expected)) {
- throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
- }
-
- HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table));
- HashSet<Text> ess = new HashSet<Text>();
- for (String es : expectedSplits) {
- ess.add(new Text(es));
- }
-
- if (!currentSplits.equals(ess)) {
- throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
- }
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MetadataIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MetadataIT.java
deleted file mode 100644
index 9455456..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MetadataIT.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class MetadataIT extends AccumuloClusterHarness {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- }
-
- @Override
- public int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void testFlushAndCompact() throws Exception {
- Connector c = getConnector();
- String tableNames[] = getUniqueNames(2);
-
- // create a table to write some data to metadata table
- c.tableOperations().create(tableNames[0]);
-
- Scanner rootScanner = c.createScanner(RootTable.NAME, Authorizations.EMPTY);
- rootScanner.setRange(MetadataSchema.TabletsSection.getRange());
- rootScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
- Set<String> files1 = new HashSet<String>();
- for (Entry<Key,Value> entry : rootScanner)
- files1.add(entry.getKey().getColumnQualifier().toString());
-
- c.tableOperations().create(tableNames[1]);
- c.tableOperations().flush(MetadataTable.NAME, null, null, true);
-
- Set<String> files2 = new HashSet<String>();
- for (Entry<Key,Value> entry : rootScanner)
- files2.add(entry.getKey().getColumnQualifier().toString());
-
- // flush of metadata table should change file set in root table
- Assert.assertTrue(files2.size() > 0);
- Assert.assertNotEquals(files1, files2);
-
- c.tableOperations().compact(MetadataTable.NAME, null, null, false, true);
-
- Set<String> files3 = new HashSet<String>();
- for (Entry<Key,Value> entry : rootScanner)
- files3.add(entry.getKey().getColumnQualifier().toString());
-
- // compaction of metadata table should change file set in root table
- Assert.assertNotEquals(files2, files3);
- }
-
- @Test
- public void mergeMeta() throws Exception {
- Connector c = getConnector();
- String[] names = getUniqueNames(5);
- SortedSet<Text> splits = new TreeSet<Text>();
- for (String id : "1 2 3 4 5".split(" ")) {
- splits.add(new Text(id));
- }
- c.tableOperations().addSplits(MetadataTable.NAME, splits);
- for (String tableName : names) {
- c.tableOperations().create(tableName);
- }
- c.tableOperations().merge(MetadataTable.NAME, null, null);
- Scanner s = c.createScanner(RootTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.DeletesSection.getRange());
- while (Iterators.size(s.iterator()) == 0) {
- UtilWaitThread.sleep(100);
- }
- assertEquals(0, c.tableOperations().listSplits(MetadataTable.NAME).size());
- }
-
- @Test
- public void batchScanTest() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
-
- // batch scan regular metadata table
- BatchScanner s = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
- s.setRanges(Collections.singleton(new Range()));
- int count = 0;
- for (Entry<Key,Value> e : s) {
- if (e != null)
- count++;
- }
- s.close();
- assertTrue(count > 0);
-
- // batch scan root metadata table
- s = c.createBatchScanner(RootTable.NAME, Authorizations.EMPTY, 1);
- s.setRanges(Collections.singleton(new Range()));
- count = 0;
- for (Entry<Key,Value> e : s) {
- if (e != null)
- count++;
- }
- s.close();
- assertTrue(count > 0);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
deleted file mode 100644
index 086dd1a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.MasterClient;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.master.thrift.MasterClientService.Client;
-import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
-import org.apache.accumulo.core.master.thrift.TableInfo;
-import org.apache.accumulo.core.master.thrift.TabletServerStatus;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MetadataMaxFilesIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = new HashMap<String,String>();
- siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
- siteConfig.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "10");
- cfg.setSiteConfig(siteConfig);
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- SortedSet<Text> splits = new TreeSet<Text>();
- for (int i = 0; i < 1000; i++) {
- splits.add(new Text(String.format("%03d", i)));
- }
- c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10000");
- for (int i = 0; i < 5; i++) {
- String tableName = "table" + i;
- log.info("Creating " + tableName);
- c.tableOperations().create(tableName);
- log.info("adding splits");
- c.tableOperations().addSplits(tableName, splits);
- log.info("flushing");
- c.tableOperations().flush(MetadataTable.NAME, null, null, true);
- c.tableOperations().flush(RootTable.NAME, null, null, true);
- }
- UtilWaitThread.sleep(20 * 1000);
- log.info("shutting down");
- assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
- cluster.stop();
- log.info("starting up");
- cluster.start();
-
- UtilWaitThread.sleep(30 * 1000);
-
- while (true) {
- MasterMonitorInfo stats = null;
- Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
- Client client = null;
- try {
- ClientContext context = new ClientContext(c.getInstance(), creds, getClientConfig());
- client = MasterClient.getConnectionWithRetry(context);
- stats = client.getMasterStats(Tracer.traceInfo(), context.rpcCreds());
- } finally {
- if (client != null)
- MasterClient.close(client);
- }
- int tablets = 0;
- for (TabletServerStatus tserver : stats.tServerInfo) {
- for (Entry<String,TableInfo> entry : tserver.tableMap.entrySet()) {
- if (entry.getKey().startsWith("!") || entry.getKey().startsWith("+"))
- continue;
- tablets += entry.getValue().onlineTablets;
- }
- }
- if (tablets == 5005)
- break;
- UtilWaitThread.sleep(1000);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
deleted file mode 100644
index ab2c791..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collections;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-public class MetadataSplitIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "100ms"));
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- assertEquals(1, c.tableOperations().listSplits(MetadataTable.NAME).size());
- c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "500");
- for (int i = 0; i < 10; i++) {
- c.tableOperations().create("table" + i);
- c.tableOperations().flush(MetadataTable.NAME, null, null, true);
- }
- UtilWaitThread.sleep(10 * 1000);
- assertTrue(c.tableOperations().listSplits(MetadataTable.NAME).size() > 2);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java
deleted file mode 100644
index c59c52e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.net.URL;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.monitor.Monitor;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.commons.io.FileUtils;
-import org.apache.zookeeper.KeeperException;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class MonitorLoggingIT extends ConfigurableMacBase {
- private static final Logger log = LoggerFactory.getLogger(MonitorLoggingIT.class);
-
- @Override
- public void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws Exception {
- cfg.setNumTservers(1);
- File confDir = cfg.getConfDir();
- try {
- FileUtils.copyFileToDirectory(new File(MonitorLoggingIT.class.getResource("/conf/generic_logger.xml").toURI()), confDir);
- FileUtils.copyFileToDirectory(new File(MonitorLoggingIT.class.getResource("/conf/monitor_logger.xml").toURI()), confDir);
- } catch (Exception e) {
- log.error("Failed to copy Log4J XML files to conf dir", e);
- }
- }
-
- private static final int NUM_LOCATION_PASSES = 5;
- private static final int LOCATION_DELAY_SECS = 5;
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 30 + ((NUM_LOCATION_PASSES + 2) * LOCATION_DELAY_SECS);
- }
-
- @Test
- public void logToMonitor() throws Exception {
- // Start the monitor.
- log.debug("Starting Monitor");
- Process monitor = cluster.exec(Monitor.class);
-
- // Get monitor location to ensure it is running.
- String monitorLocation = null;
- for (int i = 0; i < NUM_LOCATION_PASSES; i++) {
- Thread.sleep(LOCATION_DELAY_SECS * 1000);
- try {
- monitorLocation = getMonitor();
- break;
- } catch (KeeperException e) {
- log.debug("Monitor not up yet, trying again in " + LOCATION_DELAY_SECS + " secs");
- }
- }
- assertNotNull("Monitor failed to start within " + (LOCATION_DELAY_SECS * NUM_LOCATION_PASSES) + " secs", monitorLocation);
- log.debug("Monitor running at " + monitorLocation);
-
- // The tserver has to observe that the log-forwarding address
- // changed in ZooKeeper. If we cause the error before the tserver
- // updates, we'll never see the error on the monitor.
- Thread.sleep(10000);
-
- // Attempt a scan with an invalid iterator to force a log message in the monitor.
- try {
- Connector c = getConnector();
- Scanner s = c.createScanner("accumulo.root", new Authorizations());
- IteratorSetting cfg = new IteratorSetting(100, "incorrect", "java.lang.String");
- s.addScanIterator(cfg);
- s.iterator().next();
- } catch (RuntimeException e) {
- // expected, the iterator was bad
- }
-
- String result = "";
- while (true) {
- Thread.sleep(LOCATION_DELAY_SECS * 1000); // extra precaution to ensure monitor has opportunity to log
-
- // Verify messages were received at the monitor.
- URL url = new URL("http://" + monitorLocation + "/log");
- log.debug("Fetching web page " + url);
- result = FunctionalTestUtils.readAll(url.openStream());
- if (result.contains("<pre class='logevent'>")) {
- break;
- }
- log.debug("No messages found, waiting a little longer...");
- }
-
- assertTrue("No log messages found", result.contains("<pre class='logevent'>"));
-
- // Shutdown cleanly.
- log.debug("Stopping mini accumulo cluster");
- Process shutdown = cluster.exec(Admin.class, "stopAll");
- shutdown.waitFor();
- assertTrue(shutdown.exitValue() == 0);
- log.debug("success!");
- monitor.destroy();
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/MonitorSslIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MonitorSslIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MonitorSslIT.java
deleted file mode 100644
index 7283c4d..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/MonitorSslIT.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.net.URL;
-import java.security.KeyManagementException;
-import java.security.NoSuchAlgorithmException;
-import java.security.SecureRandom;
-import java.security.cert.CertificateException;
-import java.security.cert.X509Certificate;
-import java.util.Map;
-
-import javax.net.ssl.HostnameVerifier;
-import javax.net.ssl.HttpsURLConnection;
-import javax.net.ssl.KeyManager;
-import javax.net.ssl.SSLContext;
-import javax.net.ssl.SSLSession;
-import javax.net.ssl.TrustManager;
-import javax.net.ssl.X509TrustManager;
-
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.MonitorUtil;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-/**
- * Check SSL for the Monitor
- *
- */
-public class MonitorSslIT extends ConfigurableMacBase {
- @BeforeClass
- public static void initHttps() throws NoSuchAlgorithmException, KeyManagementException {
- SSLContext ctx = SSLContext.getInstance("SSL");
- TrustManager[] tm = new TrustManager[] {new TestTrustManager()};
- ctx.init(new KeyManager[0], tm, new SecureRandom());
- SSLContext.setDefault(ctx);
- HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory());
- HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier());
- }
-
- private static class TestTrustManager implements X509TrustManager {
- @Override
- public void checkClientTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
-
- @Override
- public void checkServerTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
-
- @Override
- public X509Certificate[] getAcceptedIssuers() {
- return null;
- }
- }
-
- private static class TestHostnameVerifier implements HostnameVerifier {
- @Override
- public boolean verify(String hostname, SSLSession session) {
- return true;
- }
- }
-
- @Override
- public int defaultTimeoutSeconds() {
- return 6 * 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- super.configure(cfg, hadoopCoreSite);
- File baseDir = createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName());
- configureForSsl(cfg, getSslDir(baseDir));
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.MONITOR_SSL_KEYSTORE.getKey(), siteConfig.get(Property.RPC_SSL_KEYSTORE_PATH.getKey()));
- siteConfig.put(Property.MONITOR_SSL_KEYSTOREPASS.getKey(), siteConfig.get(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey()));
- if (siteConfig.containsKey(Property.RPC_SSL_KEYSTORE_TYPE.getKey())) {
- siteConfig.put(Property.MONITOR_SSL_KEYSTORETYPE.getKey(), siteConfig.get(Property.RPC_SSL_KEYSTORE_TYPE.getKey()));
- } else {
- siteConfig.put(Property.MONITOR_SSL_KEYSTORETYPE.getKey(), Property.RPC_SSL_KEYSTORE_TYPE.getDefaultValue());
- }
- siteConfig.put(Property.MONITOR_SSL_TRUSTSTORE.getKey(), siteConfig.get(Property.RPC_SSL_TRUSTSTORE_PATH.getKey()));
- siteConfig.put(Property.MONITOR_SSL_TRUSTSTOREPASS.getKey(), siteConfig.get(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey()));
- if (siteConfig.containsKey(Property.RPC_SSL_TRUSTSTORE_TYPE.getKey())) {
- siteConfig.put(Property.MONITOR_SSL_TRUSTSTORETYPE.getKey(), siteConfig.get(Property.RPC_SSL_TRUSTSTORE_TYPE.getKey()));
- } else {
- siteConfig.put(Property.MONITOR_SSL_TRUSTSTORETYPE.getKey(), Property.RPC_SSL_TRUSTSTORE_TYPE.getDefaultValue());
- }
- cfg.setSiteConfig(siteConfig);
- }
-
- @Test
- public void test() throws Exception {
- log.debug("Starting Monitor");
- cluster.getClusterControl().startAllServers(ServerType.MONITOR);
- String monitorLocation = null;
- while (null == monitorLocation) {
- try {
- monitorLocation = MonitorUtil.getLocation(getConnector().getInstance());
- } catch (Exception e) {
- // ignored
- }
- if (null == monitorLocation) {
- log.debug("Could not fetch monitor HTTP address from zookeeper");
- Thread.sleep(2000);
- }
- }
- URL url = new URL("https://" + monitorLocation);
- log.debug("Fetching web page " + url);
- String result = FunctionalTestUtils.readAll(url.openStream());
- assertTrue(result.length() > 100);
- assertTrue(result.indexOf("Accumulo Overview") >= 0);
- }
-
-}
[18/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/MultiTableRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/MultiTableRecoveryIT.java b/test/src/test/java/org/apache/accumulo/test/MultiTableRecoveryIT.java
deleted file mode 100644
index 37e4957..0000000
--- a/test/src/test/java/org/apache/accumulo/test/MultiTableRecoveryIT.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class MultiTableRecoveryIT extends ConfigurableMacBase {
-
- @Override
- protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
-
- // use raw local file system so walogs sync and flush will work
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Test(timeout = 4 * 60 * 1000)
- public void testRecoveryOverMultipleTables() throws Exception {
- final int N = 3;
- final Connector c = getConnector();
- final String[] tables = getUniqueNames(N);
- final BatchWriter[] writers = new BatchWriter[N];
- final byte[][] values = new byte[N][];
- int i = 0;
- System.out.println("Creating tables");
- for (String tableName : tables) {
- c.tableOperations().create(tableName);
- values[i] = Integer.toString(i).getBytes();
- writers[i] = c.createBatchWriter(tableName, null);
- i++;
- }
- System.out.println("Creating agitator");
- final AtomicBoolean stop = new AtomicBoolean(false);
- final Thread agitator = agitator(stop);
- agitator.start();
- System.out.println("writing");
- final Random random = new Random();
- for (i = 0; i < 1_000_000; i++) {
- // make non-negative avoiding Math.abs, because that can still be negative
- long randomRow = random.nextLong() & Long.MAX_VALUE;
- assertTrue(randomRow >= 0);
- final int table = (int) (randomRow % N);
- final Mutation m = new Mutation(Long.toHexString(randomRow));
- m.put(new byte[0], new byte[0], values[table]);
- writers[table].addMutation(m);
- if (i % 10_000 == 0) {
- System.out.println("flushing");
- for (int w = 0; w < N; w++) {
- writers[w].flush();
- }
- }
- }
- System.out.println("closing");
- for (int w = 0; w < N; w++) {
- writers[w].close();
- }
- System.out.println("stopping the agitator");
- stop.set(true);
- agitator.join();
- System.out.println("checking the data");
- long count = 0;
- for (int w = 0; w < N; w++) {
- Scanner scanner = c.createScanner(tables[w], Authorizations.EMPTY);
- for (Entry<Key,Value> entry : scanner) {
- int value = Integer.parseInt(entry.getValue().toString());
- assertEquals(w, value);
- count++;
- }
- scanner.close();
- }
- assertEquals(1_000_000, count);
- }
-
- private Thread agitator(final AtomicBoolean stop) {
- return new Thread() {
- @Override
- public void run() {
- try {
- int i = 0;
- while (!stop.get()) {
- UtilWaitThread.sleep(10 * 1000);
- System.out.println("Restarting");
- getCluster().getClusterControl().stop(ServerType.TABLET_SERVER);
- getCluster().start();
- // read the metadata table to know everything is back up
- Iterators.size(getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
- i++;
- }
- System.out.println("Restarted " + i + " times");
- } catch (Exception ex) {
- log.error("{}", ex.getMessage(), ex);
- }
- }
- };
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/NamespacesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/NamespacesIT.java b/test/src/test/java/org/apache/accumulo/test/NamespacesIT.java
deleted file mode 100644
index 0ecdd0d..0000000
--- a/test/src/test/java/org/apache/accumulo/test/NamespacesIT.java
+++ /dev/null
@@ -1,1362 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.NamespaceExistsException;
-import org.apache.accumulo.core.client.NamespaceNotEmptyException;
-import org.apache.accumulo.core.client.NamespaceNotFoundException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.NamespaceOperations;
-import org.apache.accumulo.core.client.admin.NewTableConfiguration;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.client.impl.Namespaces;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.impl.thrift.TableOperation;
-import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
-import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
-import org.apache.accumulo.core.client.security.SecurityErrorCode;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.Filter;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.iterators.user.VersioningIterator;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.NamespacePermission;
-import org.apache.accumulo.core.security.SystemPermission;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-
-// Testing default namespace configuration with inheritance requires altering the system state and restoring it back to normal
-// Punt on this for now and just let it use a minicluster.
-public class NamespacesIT extends AccumuloClusterHarness {
-
- private Connector c;
- private String namespace;
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Before
- public void setupConnectorAndNamespace() throws Exception {
- Assume.assumeTrue(ClusterType.MINI == getClusterType());
-
- // prepare a unique namespace and get a new root connector for each test
- c = getConnector();
- namespace = "ns_" + getUniqueNames(1)[0];
- }
-
- @After
- public void swingMjölnir() throws Exception {
- if (null == c) {
- return;
- }
- // clean up any added tables, namespaces, and users, after each test
- for (String t : c.tableOperations().list())
- if (!Tables.qualify(t).getFirst().equals(Namespaces.ACCUMULO_NAMESPACE))
- c.tableOperations().delete(t);
- assertEquals(3, c.tableOperations().list().size());
- for (String n : c.namespaceOperations().list())
- if (!n.equals(Namespaces.ACCUMULO_NAMESPACE) && !n.equals(Namespaces.DEFAULT_NAMESPACE))
- c.namespaceOperations().delete(n);
- assertEquals(2, c.namespaceOperations().list().size());
- for (String u : c.securityOperations().listLocalUsers())
- if (!getAdminPrincipal().equals(u))
- c.securityOperations().dropLocalUser(u);
- assertEquals(1, c.securityOperations().listLocalUsers().size());
- }
-
- @Test
- public void checkReservedNamespaces() throws Exception {
- assertEquals(c.namespaceOperations().defaultNamespace(), Namespaces.DEFAULT_NAMESPACE);
- assertEquals(c.namespaceOperations().systemNamespace(), Namespaces.ACCUMULO_NAMESPACE);
- }
-
- @Test
- public void checkBuiltInNamespaces() throws Exception {
- assertTrue(c.namespaceOperations().exists(Namespaces.DEFAULT_NAMESPACE));
- assertTrue(c.namespaceOperations().exists(Namespaces.ACCUMULO_NAMESPACE));
- }
-
- @Test
- public void createTableInDefaultNamespace() throws Exception {
- String tableName = "1";
- c.tableOperations().create(tableName);
- assertTrue(c.tableOperations().exists(tableName));
- }
-
- @Test(expected = AccumuloException.class)
- public void createTableInAccumuloNamespace() throws Exception {
- String tableName = Namespaces.ACCUMULO_NAMESPACE + ".1";
- assertFalse(c.tableOperations().exists(tableName));
- c.tableOperations().create(tableName); // should fail
- }
-
- @Test(expected = AccumuloSecurityException.class)
- public void deleteDefaultNamespace() throws Exception {
- c.namespaceOperations().delete(Namespaces.DEFAULT_NAMESPACE); // should fail
- }
-
- @Test(expected = AccumuloSecurityException.class)
- public void deleteAccumuloNamespace() throws Exception {
- c.namespaceOperations().delete(Namespaces.ACCUMULO_NAMESPACE); // should fail
- }
-
- @Test
- public void createTableInMissingNamespace() throws Exception {
- String t = namespace + ".1";
- assertFalse(c.namespaceOperations().exists(namespace));
- assertFalse(c.tableOperations().exists(t));
- try {
- c.tableOperations().create(t);
- fail();
- } catch (AccumuloException e) {
- assertEquals(NamespaceNotFoundException.class.getName(), e.getCause().getClass().getName());
- assertFalse(c.namespaceOperations().exists(namespace));
- assertFalse(c.tableOperations().exists(t));
- }
- }
-
- @Test
- public void createAndDeleteNamespace() throws Exception {
- String t1 = namespace + ".1";
- String t2 = namespace + ".2";
- assertFalse(c.namespaceOperations().exists(namespace));
- assertFalse(c.tableOperations().exists(t1));
- assertFalse(c.tableOperations().exists(t2));
- try {
- c.namespaceOperations().delete(namespace);
- } catch (NamespaceNotFoundException e) {}
- try {
- c.tableOperations().delete(t1);
- } catch (TableNotFoundException e) {
- assertEquals(NamespaceNotFoundException.class.getName(), e.getCause().getClass().getName());
- }
- c.namespaceOperations().create(namespace);
- assertTrue(c.namespaceOperations().exists(namespace));
- assertFalse(c.tableOperations().exists(t1));
- assertFalse(c.tableOperations().exists(t2));
- c.tableOperations().create(t1);
- assertTrue(c.namespaceOperations().exists(namespace));
- assertTrue(c.tableOperations().exists(t1));
- assertFalse(c.tableOperations().exists(t2));
- c.tableOperations().create(t2);
- assertTrue(c.namespaceOperations().exists(namespace));
- assertTrue(c.tableOperations().exists(t1));
- assertTrue(c.tableOperations().exists(t2));
- c.tableOperations().delete(t1);
- assertTrue(c.namespaceOperations().exists(namespace));
- assertFalse(c.tableOperations().exists(t1));
- assertTrue(c.tableOperations().exists(t2));
- c.tableOperations().delete(t2);
- assertTrue(c.namespaceOperations().exists(namespace));
- assertFalse(c.tableOperations().exists(t1));
- assertFalse(c.tableOperations().exists(t2));
- c.namespaceOperations().delete(namespace);
- assertFalse(c.namespaceOperations().exists(namespace));
- assertFalse(c.tableOperations().exists(t1));
- assertFalse(c.tableOperations().exists(t2));
- }
-
- @Test(expected = NamespaceNotEmptyException.class)
- public void deleteNonEmptyNamespace() throws Exception {
- String tableName1 = namespace + ".1";
- assertFalse(c.namespaceOperations().exists(namespace));
- assertFalse(c.tableOperations().exists(tableName1));
- c.namespaceOperations().create(namespace);
- c.tableOperations().create(tableName1);
- assertTrue(c.namespaceOperations().exists(namespace));
- assertTrue(c.tableOperations().exists(tableName1));
- c.namespaceOperations().delete(namespace); // should fail
- }
-
- @Test
- public void verifyPropertyInheritance() throws Exception {
- String t0 = "0";
- String t1 = namespace + ".1";
- String t2 = namespace + ".2";
-
- String k = Property.TABLE_SCAN_MAXMEM.getKey();
- String v = "42K";
-
- assertFalse(c.namespaceOperations().exists(namespace));
- assertFalse(c.tableOperations().exists(t1));
- assertFalse(c.tableOperations().exists(t2));
- c.namespaceOperations().create(namespace);
- c.tableOperations().create(t1);
- c.tableOperations().create(t0);
- assertTrue(c.namespaceOperations().exists(namespace));
- assertTrue(c.tableOperations().exists(t1));
- assertTrue(c.tableOperations().exists(t0));
-
- // verify no property
- assertFalse(checkNamespaceHasProp(namespace, k, v));
- assertFalse(checkTableHasProp(t1, k, v));
- assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
- assertFalse(checkTableHasProp(t0, k, v));
-
- // set property and verify
- c.namespaceOperations().setProperty(namespace, k, v);
- assertTrue(checkNamespaceHasProp(namespace, k, v));
- assertTrue(checkTableHasProp(t1, k, v));
- assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
- assertFalse(checkTableHasProp(t0, k, v));
-
- // add a new table to namespace and verify
- assertFalse(c.tableOperations().exists(t2));
- c.tableOperations().create(t2);
- assertTrue(c.tableOperations().exists(t2));
- assertTrue(checkNamespaceHasProp(namespace, k, v));
- assertTrue(checkTableHasProp(t1, k, v));
- assertTrue(checkTableHasProp(t2, k, v));
- assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
- assertFalse(checkTableHasProp(t0, k, v));
-
- // remove property and verify
- c.namespaceOperations().removeProperty(namespace, k);
- assertFalse(checkNamespaceHasProp(namespace, k, v));
- assertFalse(checkTableHasProp(t1, k, v));
- assertFalse(checkTableHasProp(t2, k, v));
- assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
- assertFalse(checkTableHasProp(t0, k, v));
-
- // set property on default namespace and verify
- c.namespaceOperations().setProperty(Namespaces.DEFAULT_NAMESPACE, k, v);
- assertFalse(checkNamespaceHasProp(namespace, k, v));
- assertFalse(checkTableHasProp(t1, k, v));
- assertFalse(checkTableHasProp(t2, k, v));
- assertTrue(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
- assertTrue(checkTableHasProp(t0, k, v));
-
- // test that table properties override namespace properties
- String k2 = Property.TABLE_FILE_MAX.getKey();
- String v2 = "42";
- String table_v2 = "13";
-
- // set new property on some
- c.namespaceOperations().setProperty(namespace, k2, v2);
- c.tableOperations().setProperty(t2, k2, table_v2);
- assertTrue(checkNamespaceHasProp(namespace, k2, v2));
- assertTrue(checkTableHasProp(t1, k2, v2));
- assertTrue(checkTableHasProp(t2, k2, table_v2));
-
- c.tableOperations().delete(t1);
- c.tableOperations().delete(t2);
- c.tableOperations().delete(t0);
- c.namespaceOperations().delete(namespace);
- }
-
- @Test
- public void verifyIteratorInheritance() throws Exception {
- String t1 = namespace + ".1";
- c.namespaceOperations().create(namespace);
- c.tableOperations().create(t1);
- String iterName = namespace + "_iter";
-
- BatchWriter bw = c.createBatchWriter(t1, new BatchWriterConfig());
- Mutation m = new Mutation("r");
- m.put("a", "b", new Value("abcde".getBytes()));
- bw.addMutation(m);
- bw.flush();
- bw.close();
-
- IteratorSetting setting = new IteratorSetting(250, iterName, SimpleFilter.class.getName());
-
- // verify can see inserted entry
- Scanner s = c.createScanner(t1, Authorizations.EMPTY);
- assertTrue(s.iterator().hasNext());
- assertFalse(c.namespaceOperations().listIterators(namespace).containsKey(iterName));
- assertFalse(c.tableOperations().listIterators(t1).containsKey(iterName));
-
- // verify entry is filtered out (also, verify conflict checking API)
- c.namespaceOperations().checkIteratorConflicts(namespace, setting, EnumSet.allOf(IteratorScope.class));
- c.namespaceOperations().attachIterator(namespace, setting);
- UtilWaitThread.sleep(2 * 1000);
- try {
- c.namespaceOperations().checkIteratorConflicts(namespace, setting, EnumSet.allOf(IteratorScope.class));
- fail();
- } catch (AccumuloException e) {
- assertEquals(IllegalArgumentException.class.getName(), e.getCause().getClass().getName());
- }
- IteratorSetting setting2 = c.namespaceOperations().getIteratorSetting(namespace, setting.getName(), IteratorScope.scan);
- assertEquals(setting, setting2);
- assertTrue(c.namespaceOperations().listIterators(namespace).containsKey(iterName));
- assertTrue(c.tableOperations().listIterators(t1).containsKey(iterName));
- s = c.createScanner(t1, Authorizations.EMPTY);
- assertFalse(s.iterator().hasNext());
-
- // verify can see inserted entry again
- c.namespaceOperations().removeIterator(namespace, setting.getName(), EnumSet.allOf(IteratorScope.class));
- UtilWaitThread.sleep(2 * 1000);
- assertFalse(c.namespaceOperations().listIterators(namespace).containsKey(iterName));
- assertFalse(c.tableOperations().listIterators(t1).containsKey(iterName));
- s = c.createScanner(t1, Authorizations.EMPTY);
- assertTrue(s.iterator().hasNext());
- }
-
- @Test
- public void cloneTable() throws Exception {
- String namespace2 = namespace + "_clone";
- String t1 = namespace + ".1";
- String t2 = namespace + ".2";
- String t3 = namespace2 + ".2";
- String k1 = Property.TABLE_FILE_MAX.getKey();
- String k2 = Property.TABLE_FILE_REPLICATION.getKey();
- String k1v1 = "55";
- String k1v2 = "66";
- String k2v1 = "5";
- String k2v2 = "6";
-
- c.namespaceOperations().create(namespace);
- c.tableOperations().create(t1);
- assertTrue(c.tableOperations().exists(t1));
- assertFalse(c.namespaceOperations().exists(namespace2));
- assertFalse(c.tableOperations().exists(t2));
- assertFalse(c.tableOperations().exists(t3));
-
- try {
- // try to clone before namespace exists
- c.tableOperations().clone(t1, t3, false, null, null); // should fail
- fail();
- } catch (AccumuloException e) {
- assertEquals(NamespaceNotFoundException.class.getName(), e.getCause().getClass().getName());
- }
-
- // try to clone before when target tables exist
- c.namespaceOperations().create(namespace2);
- c.tableOperations().create(t2);
- c.tableOperations().create(t3);
- for (String t : Arrays.asList(t2, t3)) {
- try {
- c.tableOperations().clone(t1, t, false, null, null); // should fail
- fail();
- } catch (TableExistsException e) {
- c.tableOperations().delete(t);
- }
- }
-
- assertTrue(c.tableOperations().exists(t1));
- assertTrue(c.namespaceOperations().exists(namespace2));
- assertFalse(c.tableOperations().exists(t2));
- assertFalse(c.tableOperations().exists(t3));
-
- // set property with different values in two namespaces and a separate property with different values on the table and both namespaces
- assertFalse(checkNamespaceHasProp(namespace, k1, k1v1));
- assertFalse(checkNamespaceHasProp(namespace2, k1, k1v2));
- assertFalse(checkTableHasProp(t1, k1, k1v1));
- assertFalse(checkTableHasProp(t1, k1, k1v2));
- assertFalse(checkNamespaceHasProp(namespace, k2, k2v1));
- assertFalse(checkNamespaceHasProp(namespace2, k2, k2v1));
- assertFalse(checkTableHasProp(t1, k2, k2v1));
- assertFalse(checkTableHasProp(t1, k2, k2v2));
- c.namespaceOperations().setProperty(namespace, k1, k1v1);
- c.namespaceOperations().setProperty(namespace2, k1, k1v2);
- c.namespaceOperations().setProperty(namespace, k2, k2v1);
- c.namespaceOperations().setProperty(namespace2, k2, k2v1);
- c.tableOperations().setProperty(t1, k2, k2v2);
- assertTrue(checkNamespaceHasProp(namespace, k1, k1v1));
- assertTrue(checkNamespaceHasProp(namespace2, k1, k1v2));
- assertTrue(checkTableHasProp(t1, k1, k1v1));
- assertFalse(checkTableHasProp(t1, k1, k1v2));
- assertTrue(checkNamespaceHasProp(namespace, k2, k2v1));
- assertTrue(checkNamespaceHasProp(namespace2, k2, k2v1));
- assertFalse(checkTableHasProp(t1, k2, k2v1));
- assertTrue(checkTableHasProp(t1, k2, k2v2));
-
- // clone twice, once in same namespace, once in another
- for (String t : Arrays.asList(t2, t3))
- c.tableOperations().clone(t1, t, false, null, null);
-
- assertTrue(c.namespaceOperations().exists(namespace2));
- assertTrue(c.tableOperations().exists(t1));
- assertTrue(c.tableOperations().exists(t2));
- assertTrue(c.tableOperations().exists(t3));
-
- // verify the properties got transferred
- assertTrue(checkTableHasProp(t1, k1, k1v1));
- assertTrue(checkTableHasProp(t2, k1, k1v1));
- assertTrue(checkTableHasProp(t3, k1, k1v2));
- assertTrue(checkTableHasProp(t1, k2, k2v2));
- assertTrue(checkTableHasProp(t2, k2, k2v2));
- assertTrue(checkTableHasProp(t3, k2, k2v2));
- }
-
- @Test
- public void renameNamespaceWithTable() throws Exception {
- String namespace2 = namespace + "_renamed";
- String t1 = namespace + ".t";
- String t2 = namespace2 + ".t";
-
- c.namespaceOperations().create(namespace);
- c.tableOperations().create(t1);
- assertTrue(c.namespaceOperations().exists(namespace));
- assertTrue(c.tableOperations().exists(t1));
- assertFalse(c.namespaceOperations().exists(namespace2));
- assertFalse(c.tableOperations().exists(t2));
-
- String namespaceId = c.namespaceOperations().namespaceIdMap().get(namespace);
- String tableId = c.tableOperations().tableIdMap().get(t1);
-
- c.namespaceOperations().rename(namespace, namespace2);
- assertFalse(c.namespaceOperations().exists(namespace));
- assertFalse(c.tableOperations().exists(t1));
- assertTrue(c.namespaceOperations().exists(namespace2));
- assertTrue(c.tableOperations().exists(t2));
-
- // verify id's didn't change
- String namespaceId2 = c.namespaceOperations().namespaceIdMap().get(namespace2);
- String tableId2 = c.tableOperations().tableIdMap().get(t2);
-
- assertEquals(namespaceId, namespaceId2);
- assertEquals(tableId, tableId2);
- }
-
- @Test
- public void verifyConstraintInheritance() throws Exception {
- String t1 = namespace + ".1";
- c.namespaceOperations().create(namespace);
- c.tableOperations().create(t1, new NewTableConfiguration().withoutDefaultIterators());
- String constraintClassName = NumericValueConstraint.class.getName();
-
- assertFalse(c.namespaceOperations().listConstraints(namespace).containsKey(constraintClassName));
- assertFalse(c.tableOperations().listConstraints(t1).containsKey(constraintClassName));
-
- c.namespaceOperations().addConstraint(namespace, constraintClassName);
- assertTrue(c.namespaceOperations().listConstraints(namespace).containsKey(constraintClassName));
- assertTrue(c.tableOperations().listConstraints(t1).containsKey(constraintClassName));
- int num = c.namespaceOperations().listConstraints(namespace).get(constraintClassName);
- assertEquals(num, (int) c.tableOperations().listConstraints(t1).get(constraintClassName));
- // doesn't take effect immediately, needs time to propagate to tserver's ZooKeeper cache
- UtilWaitThread.sleep(250);
-
- Mutation m1 = new Mutation("r1");
- Mutation m2 = new Mutation("r2");
- Mutation m3 = new Mutation("r3");
- m1.put("a", "b", new Value("abcde".getBytes(UTF_8)));
- m2.put("e", "f", new Value("123".getBytes(UTF_8)));
- m3.put("c", "d", new Value("zyxwv".getBytes(UTF_8)));
- BatchWriter bw = c.createBatchWriter(t1, new BatchWriterConfig());
- bw.addMutations(Arrays.asList(m1, m2, m3));
- try {
- bw.close();
- fail();
- } catch (MutationsRejectedException e) {
- assertEquals(1, e.getConstraintViolationSummaries().size());
- assertEquals(2, e.getConstraintViolationSummaries().get(0).getNumberOfViolatingMutations());
- }
- c.namespaceOperations().removeConstraint(namespace, num);
- assertFalse(c.namespaceOperations().listConstraints(namespace).containsKey(constraintClassName));
- assertFalse(c.tableOperations().listConstraints(t1).containsKey(constraintClassName));
- // doesn't take effect immediately, needs time to propagate to tserver's ZooKeeper cache
- UtilWaitThread.sleep(250);
-
- bw = c.createBatchWriter(t1, new BatchWriterConfig());
- bw.addMutations(Arrays.asList(m1, m2, m3));
- bw.close();
- }
-
- @Test
- public void renameTable() throws Exception {
- String namespace2 = namespace + "_renamed";
- String t1 = namespace + ".1";
- String t2 = namespace2 + ".2";
- String t3 = namespace + ".3";
- String t4 = namespace + ".4";
- String t5 = "5";
-
- c.namespaceOperations().create(namespace);
- c.namespaceOperations().create(namespace2);
-
- assertTrue(c.namespaceOperations().exists(namespace));
- assertTrue(c.namespaceOperations().exists(namespace2));
- assertFalse(c.tableOperations().exists(t1));
- assertFalse(c.tableOperations().exists(t2));
- assertFalse(c.tableOperations().exists(t3));
- assertFalse(c.tableOperations().exists(t4));
- assertFalse(c.tableOperations().exists(t5));
-
- c.tableOperations().create(t1);
-
- try {
- c.tableOperations().rename(t1, t2);
- fail();
- } catch (AccumuloException e) {
- // this is expected, because we don't allow renames across namespaces
- assertEquals(ThriftTableOperationException.class.getName(), e.getCause().getClass().getName());
- assertEquals(TableOperation.RENAME, ((ThriftTableOperationException) e.getCause()).getOp());
- assertEquals(TableOperationExceptionType.INVALID_NAME, ((ThriftTableOperationException) e.getCause()).getType());
- }
-
- try {
- c.tableOperations().rename(t1, t5);
- fail();
- } catch (AccumuloException e) {
- // this is expected, because we don't allow renames across namespaces
- assertEquals(ThriftTableOperationException.class.getName(), e.getCause().getClass().getName());
- assertEquals(TableOperation.RENAME, ((ThriftTableOperationException) e.getCause()).getOp());
- assertEquals(TableOperationExceptionType.INVALID_NAME, ((ThriftTableOperationException) e.getCause()).getType());
- }
-
- assertTrue(c.tableOperations().exists(t1));
- assertFalse(c.tableOperations().exists(t2));
- assertFalse(c.tableOperations().exists(t3));
- assertFalse(c.tableOperations().exists(t4));
- assertFalse(c.tableOperations().exists(t5));
-
- // fully qualified rename
- c.tableOperations().rename(t1, t3);
- assertFalse(c.tableOperations().exists(t1));
- assertFalse(c.tableOperations().exists(t2));
- assertTrue(c.tableOperations().exists(t3));
- assertFalse(c.tableOperations().exists(t4));
- assertFalse(c.tableOperations().exists(t5));
- }
-
- private void loginAs(ClusterUser user) throws IOException {
- user.getToken();
- }
-
- /**
- * Tests new Namespace permissions as well as modifications to Table permissions because of namespaces. Checks each permission to first make sure the user
- * doesn't have permission to perform the action, then root grants them the permission and we check to make sure they could perform the action.
- */
- @Test
- public void testPermissions() throws Exception {
- ClusterUser user1 = getUser(0), user2 = getUser(1), root = getAdminUser();
- String u1 = user1.getPrincipal();
- String u2 = user2.getPrincipal();
- PasswordToken pass = (null != user1.getPassword() ? new PasswordToken(user1.getPassword()) : null);
-
- String n1 = namespace;
- String t1 = n1 + ".1";
- String t2 = n1 + ".2";
- String t3 = n1 + ".3";
-
- String n2 = namespace + "_2";
-
- loginAs(root);
- c.namespaceOperations().create(n1);
- c.tableOperations().create(t1);
-
- c.securityOperations().createLocalUser(u1, pass);
-
- loginAs(user1);
- Connector user1Con = c.getInstance().getConnector(u1, user1.getToken());
-
- try {
- user1Con.tableOperations().create(t2);
- fail();
- } catch (AccumuloSecurityException e) {
- expectPermissionDenied(e);
- }
-
- loginAs(root);
- c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.CREATE_TABLE);
- loginAs(user1);
- user1Con.tableOperations().create(t2);
- loginAs(root);
- assertTrue(c.tableOperations().list().contains(t2));
- c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.CREATE_TABLE);
-
- loginAs(user1);
- try {
- user1Con.tableOperations().delete(t1);
- fail();
- } catch (AccumuloSecurityException e) {
- expectPermissionDenied(e);
- }
-
- loginAs(root);
- c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.DROP_TABLE);
- loginAs(user1);
- user1Con.tableOperations().delete(t1);
- loginAs(root);
- assertTrue(!c.tableOperations().list().contains(t1));
- c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.DROP_TABLE);
-
- c.tableOperations().create(t3);
- BatchWriter bw = c.createBatchWriter(t3, null);
- Mutation m = new Mutation("row");
- m.put("cf", "cq", "value");
- bw.addMutation(m);
- bw.close();
-
- loginAs(user1);
- Iterator<Entry<Key,Value>> i = user1Con.createScanner(t3, new Authorizations()).iterator();
- try {
- i.next();
- fail();
- } catch (RuntimeException e) {
- assertEquals(AccumuloSecurityException.class.getName(), e.getCause().getClass().getName());
- expectPermissionDenied((AccumuloSecurityException) e.getCause());
- }
-
- loginAs(user1);
- m = new Mutation(u1);
- m.put("cf", "cq", "turtles");
- bw = user1Con.createBatchWriter(t3, null);
- try {
- bw.addMutation(m);
- bw.close();
- fail();
- } catch (MutationsRejectedException e) {
- assertEquals(1, e.getSecurityErrorCodes().size());
- assertEquals(1, e.getSecurityErrorCodes().entrySet().iterator().next().getValue().size());
- switch (e.getSecurityErrorCodes().entrySet().iterator().next().getValue().iterator().next()) {
- case PERMISSION_DENIED:
- break;
- default:
- fail();
- }
- }
-
- loginAs(root);
- c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.READ);
- loginAs(user1);
- i = user1Con.createScanner(t3, new Authorizations()).iterator();
- assertTrue(i.hasNext());
- loginAs(root);
- c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.READ);
- c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.WRITE);
-
- loginAs(user1);
- m = new Mutation(u1);
- m.put("cf", "cq", "turtles");
- bw = user1Con.createBatchWriter(t3, null);
- bw.addMutation(m);
- bw.close();
- loginAs(root);
- c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.WRITE);
-
- loginAs(user1);
- try {
- user1Con.tableOperations().setProperty(t3, Property.TABLE_FILE_MAX.getKey(), "42");
- fail();
- } catch (AccumuloSecurityException e) {
- expectPermissionDenied(e);
- }
-
- loginAs(root);
- c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.ALTER_TABLE);
- loginAs(user1);
- user1Con.tableOperations().setProperty(t3, Property.TABLE_FILE_MAX.getKey(), "42");
- user1Con.tableOperations().removeProperty(t3, Property.TABLE_FILE_MAX.getKey());
- loginAs(root);
- c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.ALTER_TABLE);
-
- loginAs(user1);
- try {
- user1Con.namespaceOperations().setProperty(n1, Property.TABLE_FILE_MAX.getKey(), "55");
- fail();
- } catch (AccumuloSecurityException e) {
- expectPermissionDenied(e);
- }
-
- loginAs(root);
- c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.ALTER_NAMESPACE);
- loginAs(user1);
- user1Con.namespaceOperations().setProperty(n1, Property.TABLE_FILE_MAX.getKey(), "42");
- user1Con.namespaceOperations().removeProperty(n1, Property.TABLE_FILE_MAX.getKey());
- loginAs(root);
- c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.ALTER_NAMESPACE);
-
- loginAs(root);
- c.securityOperations().createLocalUser(u2, (root.getPassword() == null ? null : new PasswordToken(user2.getPassword())));
- loginAs(user1);
- try {
- user1Con.securityOperations().grantNamespacePermission(u2, n1, NamespacePermission.ALTER_NAMESPACE);
- fail();
- } catch (AccumuloSecurityException e) {
- expectPermissionDenied(e);
- }
-
- loginAs(root);
- c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.GRANT);
- loginAs(user1);
- user1Con.securityOperations().grantNamespacePermission(u2, n1, NamespacePermission.ALTER_NAMESPACE);
- user1Con.securityOperations().revokeNamespacePermission(u2, n1, NamespacePermission.ALTER_NAMESPACE);
- loginAs(root);
- c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.GRANT);
-
- loginAs(user1);
- try {
- user1Con.namespaceOperations().create(n2);
- fail();
- } catch (AccumuloSecurityException e) {
- expectPermissionDenied(e);
- }
-
- loginAs(root);
- c.securityOperations().grantSystemPermission(u1, SystemPermission.CREATE_NAMESPACE);
- loginAs(user1);
- user1Con.namespaceOperations().create(n2);
- loginAs(root);
- c.securityOperations().revokeSystemPermission(u1, SystemPermission.CREATE_NAMESPACE);
-
- c.securityOperations().revokeNamespacePermission(u1, n2, NamespacePermission.DROP_NAMESPACE);
- loginAs(user1);
- try {
- user1Con.namespaceOperations().delete(n2);
- fail();
- } catch (AccumuloSecurityException e) {
- expectPermissionDenied(e);
- }
-
- loginAs(root);
- c.securityOperations().grantSystemPermission(u1, SystemPermission.DROP_NAMESPACE);
- loginAs(user1);
- user1Con.namespaceOperations().delete(n2);
- loginAs(root);
- c.securityOperations().revokeSystemPermission(u1, SystemPermission.DROP_NAMESPACE);
-
- loginAs(user1);
- try {
- user1Con.namespaceOperations().setProperty(n1, Property.TABLE_FILE_MAX.getKey(), "33");
- fail();
- } catch (AccumuloSecurityException e) {
- expectPermissionDenied(e);
- }
-
- loginAs(root);
- c.securityOperations().grantSystemPermission(u1, SystemPermission.ALTER_NAMESPACE);
- loginAs(user1);
- user1Con.namespaceOperations().setProperty(n1, Property.TABLE_FILE_MAX.getKey(), "33");
- user1Con.namespaceOperations().removeProperty(n1, Property.TABLE_FILE_MAX.getKey());
- loginAs(root);
- c.securityOperations().revokeSystemPermission(u1, SystemPermission.ALTER_NAMESPACE);
- }
-
- @Test
- public void verifySystemPropertyInheritance() throws Exception {
- String t1 = "1";
- String t2 = namespace + "." + t1;
- c.tableOperations().create(t1);
- c.namespaceOperations().create(namespace);
- c.tableOperations().create(t2);
-
- // verify iterator inheritance
- _verifySystemPropertyInheritance(t1, t2, Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.sum", "20," + SimpleFilter.class.getName(), false);
-
- // verify constraint inheritance
- _verifySystemPropertyInheritance(t1, t2, Property.TABLE_CONSTRAINT_PREFIX.getKey() + "42", NumericValueConstraint.class.getName(), false);
-
- // verify other inheritance
- _verifySystemPropertyInheritance(t1, t2, Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "dummy", "dummy", true);
- }
-
- private void _verifySystemPropertyInheritance(String defaultNamespaceTable, String namespaceTable, String k, String v, boolean systemNamespaceShouldInherit)
- throws Exception {
- // nobody should have any of these properties yet
- assertFalse(c.instanceOperations().getSystemConfiguration().containsValue(v));
- assertFalse(checkNamespaceHasProp(Namespaces.ACCUMULO_NAMESPACE, k, v));
- assertFalse(checkTableHasProp(RootTable.NAME, k, v));
- assertFalse(checkTableHasProp(MetadataTable.NAME, k, v));
- assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
- assertFalse(checkTableHasProp(defaultNamespaceTable, k, v));
- assertFalse(checkNamespaceHasProp(namespace, k, v));
- assertFalse(checkTableHasProp(namespaceTable, k, v));
-
- // set the filter, verify that accumulo namespace is the only one unaffected
- c.instanceOperations().setProperty(k, v);
- // doesn't take effect immediately, needs time to propagate to tserver's ZooKeeper cache
- UtilWaitThread.sleep(250);
- assertTrue(c.instanceOperations().getSystemConfiguration().containsValue(v));
- assertEquals(systemNamespaceShouldInherit, checkNamespaceHasProp(Namespaces.ACCUMULO_NAMESPACE, k, v));
- assertEquals(systemNamespaceShouldInherit, checkTableHasProp(RootTable.NAME, k, v));
- assertEquals(systemNamespaceShouldInherit, checkTableHasProp(MetadataTable.NAME, k, v));
- assertTrue(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
- assertTrue(checkTableHasProp(defaultNamespaceTable, k, v));
- assertTrue(checkNamespaceHasProp(namespace, k, v));
- assertTrue(checkTableHasProp(namespaceTable, k, v));
-
- // verify it is no longer inherited
- c.instanceOperations().removeProperty(k);
- // doesn't take effect immediately, needs time to propagate to tserver's ZooKeeper cache
- UtilWaitThread.sleep(250);
- assertFalse(c.instanceOperations().getSystemConfiguration().containsValue(v));
- assertFalse(checkNamespaceHasProp(Namespaces.ACCUMULO_NAMESPACE, k, v));
- assertFalse(checkTableHasProp(RootTable.NAME, k, v));
- assertFalse(checkTableHasProp(MetadataTable.NAME, k, v));
- assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
- assertFalse(checkTableHasProp(defaultNamespaceTable, k, v));
- assertFalse(checkNamespaceHasProp(namespace, k, v));
- assertFalse(checkTableHasProp(namespaceTable, k, v));
- }
-
- @Test
- public void listNamespaces() throws Exception {
- SortedSet<String> namespaces = c.namespaceOperations().list();
- Map<String,String> map = c.namespaceOperations().namespaceIdMap();
- assertEquals(2, namespaces.size());
- assertEquals(2, map.size());
- assertTrue(namespaces.contains(Namespaces.ACCUMULO_NAMESPACE));
- assertTrue(namespaces.contains(Namespaces.DEFAULT_NAMESPACE));
- assertFalse(namespaces.contains(namespace));
- assertEquals(Namespaces.ACCUMULO_NAMESPACE_ID, map.get(Namespaces.ACCUMULO_NAMESPACE));
- assertEquals(Namespaces.DEFAULT_NAMESPACE_ID, map.get(Namespaces.DEFAULT_NAMESPACE));
- assertNull(map.get(namespace));
-
- c.namespaceOperations().create(namespace);
- namespaces = c.namespaceOperations().list();
- map = c.namespaceOperations().namespaceIdMap();
- assertEquals(3, namespaces.size());
- assertEquals(3, map.size());
- assertTrue(namespaces.contains(Namespaces.ACCUMULO_NAMESPACE));
- assertTrue(namespaces.contains(Namespaces.DEFAULT_NAMESPACE));
- assertTrue(namespaces.contains(namespace));
- assertEquals(Namespaces.ACCUMULO_NAMESPACE_ID, map.get(Namespaces.ACCUMULO_NAMESPACE));
- assertEquals(Namespaces.DEFAULT_NAMESPACE_ID, map.get(Namespaces.DEFAULT_NAMESPACE));
- assertNotNull(map.get(namespace));
-
- c.namespaceOperations().delete(namespace);
- namespaces = c.namespaceOperations().list();
- map = c.namespaceOperations().namespaceIdMap();
- assertEquals(2, namespaces.size());
- assertEquals(2, map.size());
- assertTrue(namespaces.contains(Namespaces.ACCUMULO_NAMESPACE));
- assertTrue(namespaces.contains(Namespaces.DEFAULT_NAMESPACE));
- assertFalse(namespaces.contains(namespace));
- assertEquals(Namespaces.ACCUMULO_NAMESPACE_ID, map.get(Namespaces.ACCUMULO_NAMESPACE));
- assertEquals(Namespaces.DEFAULT_NAMESPACE_ID, map.get(Namespaces.DEFAULT_NAMESPACE));
- assertNull(map.get(namespace));
- }
-
- @Test
- public void loadClass() throws Exception {
- assertTrue(c.namespaceOperations().testClassLoad(Namespaces.DEFAULT_NAMESPACE, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName()));
- assertFalse(c.namespaceOperations().testClassLoad(Namespaces.DEFAULT_NAMESPACE, "dummy", SortedKeyValueIterator.class.getName()));
- try {
- c.namespaceOperations().testClassLoad(namespace, "dummy", "dummy");
- fail();
- } catch (NamespaceNotFoundException e) {
- // expected, ignore
- }
- }
-
- @Test
- public void testModifyingPermissions() throws Exception {
- String tableName = namespace + ".modify";
- c.namespaceOperations().create(namespace);
- c.tableOperations().create(tableName);
- assertTrue(c.securityOperations().hasTablePermission(c.whoami(), tableName, TablePermission.READ));
- c.securityOperations().revokeTablePermission(c.whoami(), tableName, TablePermission.READ);
- assertFalse(c.securityOperations().hasTablePermission(c.whoami(), tableName, TablePermission.READ));
- c.securityOperations().grantTablePermission(c.whoami(), tableName, TablePermission.READ);
- assertTrue(c.securityOperations().hasTablePermission(c.whoami(), tableName, TablePermission.READ));
- c.tableOperations().delete(tableName);
-
- try {
- c.securityOperations().hasTablePermission(c.whoami(), tableName, TablePermission.READ);
- fail();
- } catch (Exception e) {
- if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
- throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
- }
-
- try {
- c.securityOperations().grantTablePermission(c.whoami(), tableName, TablePermission.READ);
- fail();
- } catch (Exception e) {
- if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
- throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
- }
-
- try {
- c.securityOperations().revokeTablePermission(c.whoami(), tableName, TablePermission.READ);
- fail();
- } catch (Exception e) {
- if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
- throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
- }
-
- assertTrue(c.securityOperations().hasNamespacePermission(c.whoami(), namespace, NamespacePermission.READ));
- c.securityOperations().revokeNamespacePermission(c.whoami(), namespace, NamespacePermission.READ);
- assertFalse(c.securityOperations().hasNamespacePermission(c.whoami(), namespace, NamespacePermission.READ));
- c.securityOperations().grantNamespacePermission(c.whoami(), namespace, NamespacePermission.READ);
- assertTrue(c.securityOperations().hasNamespacePermission(c.whoami(), namespace, NamespacePermission.READ));
-
- c.namespaceOperations().delete(namespace);
-
- try {
- c.securityOperations().hasTablePermission(c.whoami(), tableName, TablePermission.READ);
- fail();
- } catch (Exception e) {
- if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
- throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
- }
-
- try {
- c.securityOperations().grantTablePermission(c.whoami(), tableName, TablePermission.READ);
- fail();
- } catch (Exception e) {
- if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
- throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
- }
-
- try {
- c.securityOperations().revokeTablePermission(c.whoami(), tableName, TablePermission.READ);
- fail();
- } catch (Exception e) {
- if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
- throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
- }
-
- try {
- c.securityOperations().hasNamespacePermission(c.whoami(), namespace, NamespacePermission.READ);
- fail();
- } catch (Exception e) {
- if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.NAMESPACE_DOESNT_EXIST))
- throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
- }
-
- try {
- c.securityOperations().grantNamespacePermission(c.whoami(), namespace, NamespacePermission.READ);
- fail();
- } catch (Exception e) {
- if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.NAMESPACE_DOESNT_EXIST))
- throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
- }
-
- try {
- c.securityOperations().revokeNamespacePermission(c.whoami(), namespace, NamespacePermission.READ);
- fail();
- } catch (Exception e) {
- if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.NAMESPACE_DOESNT_EXIST))
- throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
- }
-
- }
-
- @Test
- public void verifyTableOperationsExceptions() throws Exception {
- String tableName = namespace + ".1";
- IteratorSetting setting = new IteratorSetting(200, VersioningIterator.class);
- Text a = new Text("a");
- Text z = new Text("z");
- TableOperations ops = c.tableOperations();
-
- // this one doesn't throw an exception, so don't fail; just check that it works
- assertFalse(ops.exists(tableName));
-
- // table operations that should throw an AccumuloException caused by NamespaceNotFoundException
- int numRun = 0;
- ACCUMULOEXCEPTIONS_NAMESPACENOTFOUND: for (int i = 0;; ++i)
- try {
- switch (i) {
- case 0:
- ops.create(tableName);
- fail();
- break;
- case 1:
- ops.create("a");
- ops.clone("a", tableName, true, Collections.<String,String> emptyMap(), Collections.<String> emptySet());
- fail();
- break;
- case 2:
- ops.importTable(tableName, System.getProperty("user.dir") + "/target");
- fail();
- break;
- default:
- // break out of infinite loop
- assertEquals(3, i); // check test integrity
- assertEquals(3, numRun); // check test integrity
- break ACCUMULOEXCEPTIONS_NAMESPACENOTFOUND;
- }
- } catch (Exception e) {
- numRun++;
- if (!(e instanceof AccumuloException) || !(e.getCause() instanceof NamespaceNotFoundException))
- throw new Exception("Case " + i + " resulted in " + e.getClass().getName(), e);
- }
-
- // table operations that should throw an AccumuloException caused by a TableNotFoundException caused by a NamespaceNotFoundException
- // these are here because we didn't declare TableNotFoundException in the API :(
- numRun = 0;
- ACCUMULOEXCEPTIONS_TABLENOTFOUND: for (int i = 0;; ++i)
- try {
- switch (i) {
- case 0:
- ops.removeConstraint(tableName, 0);
- fail();
- break;
- case 1:
- ops.removeProperty(tableName, "a");
- fail();
- break;
- case 2:
- ops.setProperty(tableName, "a", "b");
- fail();
- break;
- default:
- // break out of infinite loop
- assertEquals(3, i); // check test integrity
- assertEquals(3, numRun); // check test integrity
- break ACCUMULOEXCEPTIONS_TABLENOTFOUND;
- }
- } catch (Exception e) {
- numRun++;
- if (!(e instanceof AccumuloException) || !(e.getCause() instanceof TableNotFoundException)
- || !(e.getCause().getCause() instanceof NamespaceNotFoundException))
- throw new Exception("Case " + i + " resulted in " + e.getClass().getName(), e);
- }
-
- // table operations that should throw a TableNotFoundException caused by NamespaceNotFoundException
- numRun = 0;
- TABLENOTFOUNDEXCEPTIONS: for (int i = 0;; ++i)
- try {
- switch (i) {
- case 0:
- ops.addConstraint(tableName, NumericValueConstraint.class.getName());
- fail();
- break;
- case 1:
- ops.addSplits(tableName, new TreeSet<Text>());
- fail();
- break;
- case 2:
- ops.attachIterator(tableName, setting);
- fail();
- break;
- case 3:
- ops.cancelCompaction(tableName);
- fail();
- break;
- case 4:
- ops.checkIteratorConflicts(tableName, setting, EnumSet.allOf(IteratorScope.class));
- fail();
- break;
- case 5:
- ops.clearLocatorCache(tableName);
- fail();
- break;
- case 6:
- ops.clone(tableName, "2", true, Collections.<String,String> emptyMap(), Collections.<String> emptySet());
- fail();
- break;
- case 7:
- ops.compact(tableName, a, z, true, true);
- fail();
- break;
- case 8:
- ops.delete(tableName);
- fail();
- break;
- case 9:
- ops.deleteRows(tableName, a, z);
- fail();
- break;
- case 10:
- ops.splitRangeByTablets(tableName, new Range(), 10);
- fail();
- break;
- case 11:
- ops.exportTable(tableName, namespace + "_dir");
- fail();
- break;
- case 12:
- ops.flush(tableName, a, z, true);
- fail();
- break;
- case 13:
- ops.getDiskUsage(Collections.singleton(tableName));
- fail();
- break;
- case 14:
- ops.getIteratorSetting(tableName, "a", IteratorScope.scan);
- fail();
- break;
- case 15:
- ops.getLocalityGroups(tableName);
- fail();
- break;
- case 16:
- ops.getMaxRow(tableName, Authorizations.EMPTY, a, true, z, true);
- fail();
- break;
- case 17:
- ops.getProperties(tableName);
- fail();
- break;
- case 18:
- ops.importDirectory(tableName, "", "", false);
- fail();
- break;
- case 19:
- ops.testClassLoad(tableName, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
- fail();
- break;
- case 20:
- ops.listConstraints(tableName);
- fail();
- break;
- case 21:
- ops.listIterators(tableName);
- fail();
- break;
- case 22:
- ops.listSplits(tableName);
- fail();
- break;
- case 23:
- ops.merge(tableName, a, z);
- fail();
- break;
- case 24:
- ops.offline(tableName, true);
- fail();
- break;
- case 25:
- ops.online(tableName, true);
- fail();
- break;
- case 26:
- ops.removeIterator(tableName, "a", EnumSet.of(IteratorScope.scan));
- fail();
- break;
- case 27:
- ops.rename(tableName, tableName + "2");
- fail();
- break;
- case 28:
- ops.setLocalityGroups(tableName, Collections.<String,Set<Text>> emptyMap());
- fail();
- break;
- default:
- // break out of infinite loop
- assertEquals(29, i); // check test integrity
- assertEquals(29, numRun); // check test integrity
- break TABLENOTFOUNDEXCEPTIONS;
- }
- } catch (Exception e) {
- numRun++;
- if (!(e instanceof TableNotFoundException) || !(e.getCause() instanceof NamespaceNotFoundException))
- throw new Exception("Case " + i + " resulted in " + e.getClass().getName(), e);
- }
- }
-
- @Test
- public void verifyNamespaceOperationsExceptions() throws Exception {
- IteratorSetting setting = new IteratorSetting(200, VersioningIterator.class);
- NamespaceOperations ops = c.namespaceOperations();
-
- // this one doesn't throw an exception, so don't fail; just check that it works
- assertFalse(ops.exists(namespace));
-
- // namespace operations that should throw a NamespaceNotFoundException
- int numRun = 0;
- NAMESPACENOTFOUND: for (int i = 0;; ++i)
- try {
- switch (i) {
- case 0:
- ops.addConstraint(namespace, NumericValueConstraint.class.getName());
- fail();
- break;
- case 1:
- ops.attachIterator(namespace, setting);
- fail();
- break;
- case 2:
- ops.checkIteratorConflicts(namespace, setting, EnumSet.of(IteratorScope.scan));
- fail();
- break;
- case 3:
- ops.delete(namespace);
- fail();
- break;
- case 4:
- ops.getIteratorSetting(namespace, "thing", IteratorScope.scan);
- fail();
- break;
- case 5:
- ops.getProperties(namespace);
- fail();
- break;
- case 6:
- ops.listConstraints(namespace);
- fail();
- break;
- case 7:
- ops.listIterators(namespace);
- fail();
- break;
- case 8:
- ops.removeConstraint(namespace, 1);
- fail();
- break;
- case 9:
- ops.removeIterator(namespace, "thing", EnumSet.allOf(IteratorScope.class));
- fail();
- break;
- case 10:
- ops.removeProperty(namespace, "a");
- fail();
- break;
- case 11:
- ops.rename(namespace, namespace + "2");
- fail();
- break;
- case 12:
- ops.setProperty(namespace, "k", "v");
- fail();
- break;
- case 13:
- ops.testClassLoad(namespace, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
- fail();
- break;
- default:
- // break out of infinite loop
- assertEquals(14, i); // check test integrity
- assertEquals(14, numRun); // check test integrity
- break NAMESPACENOTFOUND;
- }
- } catch (Exception e) {
- numRun++;
- if (!(e instanceof NamespaceNotFoundException))
- throw new Exception("Case " + i + " resulted in " + e.getClass().getName(), e);
- }
-
- // namespace operations that should throw a NamespaceExistsException
- numRun = 0;
- NAMESPACEEXISTS: for (int i = 0;; ++i)
- try {
- switch (i) {
- case 0:
- ops.create(namespace + "0");
- ops.create(namespace + "0"); // should fail here
- fail();
- break;
- case 1:
- ops.create(namespace + i + "_1");
- ops.create(namespace + i + "_2");
- ops.rename(namespace + i + "_1", namespace + i + "_2"); // should fail here
- fail();
- break;
- case 2:
- ops.create(Namespaces.DEFAULT_NAMESPACE);
- fail();
- break;
- case 3:
- ops.create(Namespaces.ACCUMULO_NAMESPACE);
- fail();
- break;
- case 4:
- ops.create(namespace + i + "_1");
- ops.rename(namespace + i + "_1", Namespaces.DEFAULT_NAMESPACE); // should fail here
- fail();
- break;
- case 5:
- ops.create(namespace + i + "_1");
- ops.rename(namespace + i + "_1", Namespaces.ACCUMULO_NAMESPACE); // should fail here
- fail();
- break;
- default:
- // break out of infinite loop
- assertEquals(6, i); // check test integrity
- assertEquals(6, numRun); // check test integrity
- break NAMESPACEEXISTS;
- }
- } catch (Exception e) {
- numRun++;
- if (!(e instanceof NamespaceExistsException))
- throw new Exception("Case " + i + " resulted in " + e.getClass().getName(), e);
- }
- }
-
- private boolean checkTableHasProp(String t, String propKey, String propVal) {
- return checkHasProperty(t, propKey, propVal, true);
- }
-
- private boolean checkNamespaceHasProp(String n, String propKey, String propVal) {
- return checkHasProperty(n, propKey, propVal, false);
- }
-
- private boolean checkHasProperty(String name, String propKey, String propVal, boolean nameIsTable) {
- try {
- Iterable<Entry<String,String>> iterable = nameIsTable ? c.tableOperations().getProperties(name) : c.namespaceOperations().getProperties(name);
- for (Entry<String,String> e : iterable)
- if (propKey.equals(e.getKey()))
- return propVal.equals(e.getValue());
- return false;
- } catch (Exception e) {
- fail();
- return false;
- }
- }
-
- public static class SimpleFilter extends Filter {
- @Override
- public boolean accept(Key k, Value v) {
- if (k.getColumnFamily().toString().equals("a"))
- return false;
- return true;
- }
- }
-
- private void expectPermissionDenied(AccumuloSecurityException sec) {
- assertEquals(sec.getSecurityErrorCode().getClass(), SecurityErrorCode.class);
- switch (sec.getSecurityErrorCode()) {
- case PERMISSION_DENIED:
- break;
- default:
- fail();
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java b/test/src/test/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java
deleted file mode 100644
index 60b3cf7..0000000
--- a/test/src/test/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.cluster.ClusterControl;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-// Accumulo3010
-public class RecoveryCompactionsAreFlushesIT extends AccumuloClusterHarness {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- // file system supports recovery
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Test
- public void test() throws Exception {
- // create a table
- String tableName = getUniqueNames(1)[0];
- Connector c = getConnector();
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "100");
- c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "3");
- // create 3 flush files
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("a");
- m.put("b", "c", new Value("v".getBytes()));
- for (int i = 0; i < 3; i++) {
- bw.addMutation(m);
- bw.flush();
- c.tableOperations().flush(tableName, null, null, true);
- }
- // create an unsaved mutation
- bw.addMutation(m);
- bw.close();
-
- ClusterControl control = cluster.getClusterControl();
-
- // kill the tablet servers
- control.stopAllServers(ServerType.TABLET_SERVER);
-
- // recover
- control.startAllServers(ServerType.TABLET_SERVER);
-
- // ensure the table is readable
- Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator());
-
- // ensure that the recovery was not a merging minor compaction
- Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
- for (Entry<Key,Value> entry : s) {
- String filename = entry.getKey().getColumnQualifier().toString();
- String parts[] = filename.split("/");
- Assert.assertFalse(parts[parts.length - 1].startsWith("M"));
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java b/test/src/test/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
deleted file mode 100644
index b3f8959..0000000
--- a/test/src/test/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.util.Collections;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.init.Initialize;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.accumulo.server.util.RandomizeVolumes;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-// ACCUMULO-3263
-public class RewriteTabletDirectoriesIT extends ConfigurableMacBase {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- private Path v1, v2;
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- File baseDir = cfg.getDir();
- File volDirBase = new File(baseDir, "volumes");
- File v1f = new File(volDirBase, "v1");
- File v2f = new File(volDirBase, "v2");
- v1 = new Path("file://" + v1f.getAbsolutePath());
- v2 = new Path("file://" + v2f.getAbsolutePath());
-
- // Use a VolumeChooser which should be more fair
- cfg.setProperty(Property.GENERAL_VOLUME_CHOOSER, FairVolumeChooser.class.getName());
- // Run MAC on two locations in the local file system
- cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString());
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- super.configure(cfg, hadoopCoreSite);
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
- final String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
-
- // Write some data to a table and add some splits
- BatchWriter bw = c.createBatchWriter(tableName, null);
- final SortedSet<Text> splits = new TreeSet<Text>();
- for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
- splits.add(new Text(split));
- Mutation m = new Mutation(new Text(split));
- m.put(new byte[] {}, new byte[] {}, new byte[] {});
- bw.addMutation(m);
- }
- bw.close();
- c.tableOperations().addSplits(tableName, splits);
-
- BatchScanner scanner = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
- DIRECTORY_COLUMN.fetch(scanner);
- String tableId = c.tableOperations().tableIdMap().get(tableName);
- assertNotNull("TableID for " + tableName + " was null", tableId);
- scanner.setRanges(Collections.singletonList(TabletsSection.getRange(tableId)));
- // verify the directory entries are all on v1, make a few entries relative
- bw = c.createBatchWriter(MetadataTable.NAME, null);
- int count = 0;
- for (Entry<Key,Value> entry : scanner) {
- assertTrue("Expected " + entry.getValue() + " to contain " + v1, entry.getValue().toString().contains(v1.toString()));
- count++;
- if (count % 2 == 0) {
- String parts[] = entry.getValue().toString().split("/");
- Key key = entry.getKey();
- Mutation m = new Mutation(key.getRow());
- m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value((Path.SEPARATOR + parts[parts.length - 1]).getBytes()));
- bw.addMutation(m);
- }
- }
- bw.close();
- assertEquals(splits.size() + 1, count);
-
- // This should fail: only one volume
- assertEquals(1, cluster.exec(RandomizeVolumes.class, "-z", cluster.getZooKeepers(), "-i", c.getInstance().getInstanceName(), "-t", tableName).waitFor());
-
- cluster.stop();
-
- // add the 2nd volume
- Configuration conf = new Configuration(false);
- conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
- conf.set(Property.INSTANCE_VOLUMES.getKey(), v1.toString() + "," + v2.toString());
- BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
- conf.writeXml(fos);
- fos.close();
-
- // initialize volume
- assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor());
- cluster.start();
- c = getConnector();
-
- // change the directory entries
- assertEquals(0, cluster.exec(Admin.class, "randomizeVolumes", "-t", tableName).waitFor());
-
- // verify a more equal sharing
- int v1Count = 0, v2Count = 0;
- for (Entry<Key,Value> entry : scanner) {
- if (entry.getValue().toString().contains(v1.toString())) {
- v1Count++;
- }
- if (entry.getValue().toString().contains(v2.toString())) {
- v2Count++;
- }
- }
-
- log.info("Count for volume1: " + v1Count);
- log.info("Count for volume2: " + v2Count);
-
- assertEquals(splits.size() + 1, v1Count + v2Count);
- // a fair chooser will differ by less than count(volumes)
- assertTrue("Expected the number of files to differ between volumes by less than 10. " + v1Count + " " + v2Count, Math.abs(v1Count - v2Count) < 2);
- // verify we can read the old data
- count = 0;
- for (Entry<Key,Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) {
- assertTrue("Found unexpected entry in table: " + entry, splits.contains(entry.getKey().getRow()));
- count++;
- }
- assertEquals(splits.size(), count);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/ScanIteratorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ScanIteratorIT.java b/test/src/test/java/org/apache/accumulo/test/ScanIteratorIT.java
deleted file mode 100644
index 00ac235..0000000
--- a/test/src/test/java/org/apache/accumulo/test/ScanIteratorIT.java
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Collections;
-import java.util.Map;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ScannerBase;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.AuthsIterator;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ScanIteratorIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(ScanIteratorIT.class);
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- private Connector connector;
- private String tableName;
- private String user;
- private boolean saslEnabled;
-
- @Before
- public void setup() throws Exception {
- connector = getConnector();
- tableName = getUniqueNames(1)[0];
-
- connector.tableOperations().create(tableName);
- ClientConfiguration clientConfig = cluster.getClientConfig();
- ClusterUser clusterUser = getUser(0);
- user = clusterUser.getPrincipal();
- PasswordToken userToken;
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- userToken = null;
- saslEnabled = true;
- } else {
- userToken = new PasswordToken(clusterUser.getPassword());
- saslEnabled = false;
- }
- if (connector.securityOperations().listLocalUsers().contains(user)) {
- log.info("Dropping {}", user);
- connector.securityOperations().dropLocalUser(user);
- }
- connector.securityOperations().createLocalUser(user, userToken);
- connector.securityOperations().grantTablePermission(user, tableName, TablePermission.READ);
- connector.securityOperations().grantTablePermission(user, tableName, TablePermission.WRITE);
- connector.securityOperations().changeUserAuthorizations(user, AuthsIterator.AUTHS);
- }
-
- @After
- public void tearDown() throws Exception {
- if (null != user) {
- if (saslEnabled) {
- ClusterUser rootUser = getAdminUser();
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- }
- connector.securityOperations().dropLocalUser(user);
- }
- }
-
- @Test
- public void testAuthsPresentInIteratorEnvironment() throws Exception {
- runTest(AuthsIterator.AUTHS, false);
- }
-
- @Test
- public void testAuthsNotPresentInIteratorEnvironment() throws Exception {
- runTest(new Authorizations("B"), true);
- }
-
- @Test
- public void testEmptyAuthsInIteratorEnvironment() throws Exception {
- runTest(Authorizations.EMPTY, true);
- }
-
- private void runTest(ScannerBase scanner, Authorizations auths, boolean shouldFail) throws AccumuloSecurityException, AccumuloException,
- TableNotFoundException {
- int count = 0;
- for (Map.Entry<Key,Value> entry : scanner) {
- assertEquals(shouldFail ? AuthsIterator.FAIL : AuthsIterator.SUCCESS, entry.getKey().getRow().toString());
- count++;
- }
-
- assertEquals(1, count);
- }
-
- private void runTest(Authorizations auths, boolean shouldFail) throws Exception {
- ClusterUser clusterUser = getUser(0);
- Connector userC = getCluster().getConnector(clusterUser.getPrincipal(), clusterUser.getToken());
- writeTestMutation(userC);
-
- IteratorSetting setting = new IteratorSetting(10, AuthsIterator.class);
-
- Scanner scanner = userC.createScanner(tableName, auths);
- scanner.addScanIterator(setting);
-
- BatchScanner batchScanner = userC.createBatchScanner(tableName, auths, 1);
- batchScanner.setRanges(Collections.singleton(new Range("1")));
- batchScanner.addScanIterator(setting);
-
- runTest(scanner, auths, shouldFail);
- runTest(batchScanner, auths, shouldFail);
-
- scanner.close();
- batchScanner.close();
- }
-
- private void writeTestMutation(Connector userC) throws TableNotFoundException, MutationsRejectedException {
- BatchWriter batchWriter = userC.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("1");
- m.put(new Text("2"), new Text("3"), new Value("".getBytes()));
- batchWriter.addMutation(m);
- batchWriter.flush();
- batchWriter.close();
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/ShellConfigIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ShellConfigIT.java b/test/src/test/java/org/apache/accumulo/test/ShellConfigIT.java
deleted file mode 100644
index 4f83668..0000000
--- a/test/src/test/java/org/apache/accumulo/test/ShellConfigIT.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.nio.charset.StandardCharsets;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.harness.conf.StandaloneAccumuloClusterConfiguration;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.test.ShellServerIT.TestShell;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class ShellConfigIT extends AccumuloClusterHarness {
- @Override
- public int defaultTimeoutSeconds() {
- return 30;
- }
-
- private String origPropValue;
-
- @Before
- public void checkProperty() throws Exception {
- Connector conn = getConnector();
- // TABLE_VOLUME_CHOOSER is a valid property that can be updated in ZK, whereas the crypto properties are not.
- // This lets us run this test more generically rather than forcibly needing to update some property in accumulo-site.xml
- origPropValue = conn.instanceOperations().getSystemConfiguration().get(Property.TABLE_VOLUME_CHOOSER.getKey());
- conn.instanceOperations().setProperty(Property.TABLE_VOLUME_CHOOSER.getKey(), FairVolumeChooser.class.getName());
- }
-
- @After
- public void resetProperty() throws Exception {
- if (null != origPropValue) {
- Connector conn = getConnector();
- conn.instanceOperations().setProperty(Property.TABLE_VOLUME_CHOOSER.getKey(), origPropValue);
- }
- }
-
- @Test
- public void experimentalPropTest() throws Exception {
- // ensure experimental props do not show up in config output unless set
-
- AuthenticationToken token = getAdminToken();
- File clientConfFile = null;
- switch (getClusterType()) {
- case MINI:
- MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) getCluster();
- clientConfFile = mac.getConfig().getClientConfFile();
- break;
- case STANDALONE:
- StandaloneAccumuloClusterConfiguration standaloneConf = (StandaloneAccumuloClusterConfiguration) getClusterConfiguration();
- clientConfFile = standaloneConf.getClientConfFile();
- break;
- default:
- Assert.fail("Unknown cluster type");
- }
-
- Assert.assertNotNull(clientConfFile);
-
- TestShell ts = null;
- if (token instanceof PasswordToken) {
- String passwd = new String(((PasswordToken) token).getPassword(), StandardCharsets.UTF_8);
- ts = new TestShell(getAdminPrincipal(), passwd, getCluster().getInstanceName(), getCluster().getZooKeepers(), clientConfFile);
- } else if (token instanceof KerberosToken) {
- ts = new TestShell(getAdminPrincipal(), null, getCluster().getInstanceName(), getCluster().getZooKeepers(), clientConfFile);
- } else {
- Assert.fail("Unknown token type");
- }
-
- assertTrue(Property.TABLE_VOLUME_CHOOSER.isExperimental());
- assertTrue(Property.CRYPTO_CIPHER_ALGORITHM_NAME.isExperimental());
-
- String configOutput = ts.exec("config");
-
- assertTrue(configOutput.contains(Property.TABLE_VOLUME_CHOOSER.getKey()));
- assertFalse(configOutput.contains(Property.CRYPTO_CIPHER_ALGORITHM_NAME.getKey()));
- }
-}
[29/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeIT.java
new file mode 100644
index 0000000..bd7555e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeIT.java
@@ -0,0 +1,244 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.Map.Entry;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class ScanRangeIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ private static final int TS_LIMIT = 1;
+ private static final int CQ_LIMIT = 5;
+ private static final int CF_LIMIT = 5;
+ private static final int ROW_LIMIT = 100;
+
+ @Test
+ public void run() throws Exception {
+ Connector c = getConnector();
+ String[] tableNames = getUniqueNames(2);
+ String table1 = tableNames[0];
+ c.tableOperations().create(table1);
+ String table2 = tableNames[1];
+ c.tableOperations().create(table2);
+ TreeSet<Text> splitRows = new TreeSet<Text>();
+ int splits = 3;
+ for (int i = (ROW_LIMIT / splits); i < ROW_LIMIT; i += (ROW_LIMIT / splits))
+ splitRows.add(createRow(i));
+ c.tableOperations().addSplits(table2, splitRows);
+
+ insertData(c, table1);
+ scanTable(c, table1);
+
+ insertData(c, table2);
+ scanTable(c, table2);
+ }
+
+ private void scanTable(Connector c, String table) throws Exception {
+ scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(1, 0, 0, 0));
+
+ scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
+
+ scanRange(c, table, null, null);
+
+ for (int i = 0; i < ROW_LIMIT; i += (ROW_LIMIT / 3)) {
+ for (int j = 0; j < CF_LIMIT; j += (CF_LIMIT / 2)) {
+ for (int k = 1; k < CQ_LIMIT; k += (CQ_LIMIT / 2)) {
+ scanRange(c, table, null, new IntKey(i, j, k, 0));
+ scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(i, j, k, 0));
+
+ scanRange(c, table, new IntKey(i, j, k, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
+
+ scanRange(c, table, new IntKey(i, j, k, 0), null);
+
+ }
+ }
+ }
+
+ for (int i = 0; i < ROW_LIMIT; i++) {
+ scanRange(c, table, new IntKey(i, 0, 0, 0), new IntKey(i, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
+
+ if (i > 0 && i < ROW_LIMIT - 1) {
+ scanRange(c, table, new IntKey(i - 1, 0, 0, 0), new IntKey(i + 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
+ }
+ }
+
+ }
+
+ private static class IntKey {
+ private int row;
+ private int cf;
+ private int cq;
+ private long ts;
+
+ IntKey(IntKey ik) {
+ this.row = ik.row;
+ this.cf = ik.cf;
+ this.cq = ik.cq;
+ this.ts = ik.ts;
+ }
+
+ IntKey(int row, int cf, int cq, long ts) {
+ this.row = row;
+ this.cf = cf;
+ this.cq = cq;
+ this.ts = ts;
+ }
+
+ Key createKey() {
+ Text trow = createRow(row);
+ Text tcf = createCF(cf);
+ Text tcq = createCQ(cq);
+
+ return new Key(trow, tcf, tcq, ts);
+ }
+
+ IntKey increment() {
+
+ IntKey ik = new IntKey(this);
+
+ ik.ts++;
+ if (ik.ts >= TS_LIMIT) {
+ ik.ts = 0;
+ ik.cq++;
+ if (ik.cq >= CQ_LIMIT) {
+ ik.cq = 0;
+ ik.cf++;
+ if (ik.cf >= CF_LIMIT) {
+ ik.cf = 0;
+ ik.row++;
+ }
+ }
+ }
+
+ return ik;
+ }
+
+ }
+
+ private void scanRange(Connector c, String table, IntKey ik1, IntKey ik2) throws Exception {
+ scanRange(c, table, ik1, false, ik2, false);
+ scanRange(c, table, ik1, false, ik2, true);
+ scanRange(c, table, ik1, true, ik2, false);
+ scanRange(c, table, ik1, true, ik2, true);
+ }
+
+ private void scanRange(Connector c, String table, IntKey ik1, boolean inclusive1, IntKey ik2, boolean inclusive2) throws Exception {
+ Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
+
+ Key key1 = null;
+ Key key2 = null;
+
+ IntKey expectedIntKey;
+ IntKey expectedEndIntKey;
+
+ if (ik1 != null) {
+ key1 = ik1.createKey();
+ expectedIntKey = ik1;
+
+ if (!inclusive1) {
+ expectedIntKey = expectedIntKey.increment();
+ }
+ } else {
+ expectedIntKey = new IntKey(0, 0, 0, 0);
+ }
+
+ if (ik2 != null) {
+ key2 = ik2.createKey();
+ expectedEndIntKey = ik2;
+
+ if (inclusive2) {
+ expectedEndIntKey = expectedEndIntKey.increment();
+ }
+ } else {
+ expectedEndIntKey = new IntKey(ROW_LIMIT, 0, 0, 0);
+ }
+
+ Range range = new Range(key1, inclusive1, key2, inclusive2);
+
+ scanner.setRange(range);
+
+ for (Entry<Key,Value> entry : scanner) {
+
+ Key expectedKey = expectedIntKey.createKey();
+ if (!expectedKey.equals(entry.getKey())) {
+ throw new Exception(" " + expectedKey + " != " + entry.getKey());
+ }
+
+ expectedIntKey = expectedIntKey.increment();
+ }
+
+ if (!expectedIntKey.createKey().equals(expectedEndIntKey.createKey())) {
+ throw new Exception(" " + expectedIntKey.createKey() + " != " + expectedEndIntKey.createKey());
+ }
+ }
+
+ private static Text createCF(int cf) {
+ Text tcf = new Text(String.format("cf_%03d", cf));
+ return tcf;
+ }
+
+ private static Text createCQ(int cf) {
+ Text tcf = new Text(String.format("cq_%03d", cf));
+ return tcf;
+ }
+
+ private static Text createRow(int row) {
+ Text trow = new Text(String.format("r_%06d", row));
+ return trow;
+ }
+
+ private void insertData(Connector c, String table) throws Exception {
+
+ BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
+
+ for (int i = 0; i < ROW_LIMIT; i++) {
+ Mutation m = new Mutation(createRow(i));
+
+ for (int j = 0; j < CF_LIMIT; j++) {
+ for (int k = 0; k < CQ_LIMIT; k++) {
+ for (int t = 0; t < TS_LIMIT; t++) {
+ m.put(createCF(j), createCQ(k), t, new Value(String.format("%06d_%03d_%03d_%03d", i, j, k, t).getBytes(UTF_8)));
+ }
+ }
+ }
+
+ bw.addMutation(m);
+ }
+
+ bw.close();
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
new file mode 100644
index 0000000..0636056
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.InstanceOperations;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ScanSessionTimeOutIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(ScanSessionTimeOutIT.class);
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.TSERV_SESSION_MAXIDLE.getKey(), "3");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ private String sessionIdle = null;
+
+ @Before
+ public void reduceSessionIdle() throws Exception {
+ InstanceOperations ops = getConnector().instanceOperations();
+ sessionIdle = ops.getSystemConfiguration().get(Property.TSERV_SESSION_MAXIDLE.getKey());
+ ops.setProperty(Property.TSERV_SESSION_MAXIDLE.getKey(), "3");
+ log.info("Waiting for existing session idle time to expire");
+ Thread.sleep(AccumuloConfiguration.getTimeInMillis(sessionIdle));
+ log.info("Finished waiting");
+ }
+
+ @After
+ public void resetSessionIdle() throws Exception {
+ if (null != sessionIdle) {
+ getConnector().instanceOperations().setProperty(Property.TSERV_SESSION_MAXIDLE.getKey(), sessionIdle);
+ }
+ }
+
+ @Test
+ public void run() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+
+ for (int i = 0; i < 100000; i++) {
+ Mutation m = new Mutation(new Text(String.format("%08d", i)));
+ for (int j = 0; j < 3; j++)
+ m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(UTF_8)));
+
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ Scanner scanner = c.createScanner(tableName, new Authorizations());
+ scanner.setBatchSize(1000);
+
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+
+ verify(iter, 0, 200);
+
+ // sleep three times the session timeout
+ UtilWaitThread.sleep(9000);
+
+ verify(iter, 200, 100000);
+
+ }
+
+ private void verify(Iterator<Entry<Key,Value>> iter, int start, int stop) throws Exception {
+ for (int i = start; i < stop; i++) {
+
+ Text er = new Text(String.format("%08d", i));
+
+ for (int j = 0; j < 3; j++) {
+ Entry<Key,Value> entry = iter.next();
+
+ if (!entry.getKey().getRow().equals(er)) {
+ throw new Exception("row " + entry.getKey().getRow() + " != " + er);
+ }
+
+ if (!entry.getKey().getColumnFamily().equals(new Text("cf1"))) {
+ throw new Exception("cf " + entry.getKey().getColumnFamily() + " != cf1");
+ }
+
+ if (!entry.getKey().getColumnQualifier().equals(new Text("cq" + j))) {
+ throw new Exception("cq " + entry.getKey().getColumnQualifier() + " != cq" + j);
+ }
+
+ if (!entry.getValue().toString().equals("" + i + "_" + j)) {
+ throw new Exception("value " + entry.getValue() + " != " + i + "_" + j);
+ }
+
+ }
+ }
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ScannerIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScannerIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ScannerIT.java
new file mode 100644
index 0000000..340a58e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ScannerIT.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.base.Stopwatch;
+
+/**
+ *
+ */
+public class ScannerIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void testScannerReadaheadConfiguration() throws Exception {
+ final String table = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ c.tableOperations().create(table);
+
+ BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
+
+ Mutation m = new Mutation("a");
+ for (int i = 0; i < 10; i++) {
+ m.put(Integer.toString(i), "", "");
+ }
+
+ bw.addMutation(m);
+ bw.close();
+
+ Scanner s = c.createScanner(table, new Authorizations());
+
+ IteratorSetting cfg = new IteratorSetting(100, SlowIterator.class);
+ // A batch size of one will end up calling seek() for each element with no calls to next()
+ SlowIterator.setSeekSleepTime(cfg, 100l);
+
+ s.addScanIterator(cfg);
+ // Never start readahead
+ s.setReadaheadThreshold(Long.MAX_VALUE);
+ s.setBatchSize(1);
+ s.setRange(new Range());
+
+ Stopwatch sw = new Stopwatch();
+ Iterator<Entry<Key,Value>> iterator = s.iterator();
+
+ sw.start();
+ while (iterator.hasNext()) {
+ sw.stop();
+
+ // While we "do work" in the client, we should be fetching the next result
+ UtilWaitThread.sleep(100l);
+ iterator.next();
+ sw.start();
+ }
+ sw.stop();
+
+ long millisWithWait = sw.elapsed(TimeUnit.MILLISECONDS);
+
+ s = c.createScanner(table, new Authorizations());
+ s.addScanIterator(cfg);
+ s.setRange(new Range());
+ s.setBatchSize(1);
+ s.setReadaheadThreshold(0l);
+
+ sw = new Stopwatch();
+ iterator = s.iterator();
+
+ sw.start();
+ while (iterator.hasNext()) {
+ sw.stop();
+
+ // While we "do work" in the client, we should be fetching the next result
+ UtilWaitThread.sleep(100l);
+ iterator.next();
+ sw.start();
+ }
+ sw.stop();
+
+ long millisWithNoWait = sw.elapsed(TimeUnit.MILLISECONDS);
+
+ // The "no-wait" time should be much less than the "wait-time"
+ Assert.assertTrue("Expected less time to be taken with immediate readahead (" + millisWithNoWait + ") than without immediate readahead (" + millisWithWait
+ + ")", millisWithNoWait < millisWithWait);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
new file mode 100644
index 0000000..02b65f4
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Collections;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Combiner;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class ServerSideErrorIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void run() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ IteratorSetting is = new IteratorSetting(5, "Bad Aggregator", BadCombiner.class);
+ Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("acf")));
+ c.tableOperations().attachIterator(tableName, is);
+
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+
+ Mutation m = new Mutation(new Text("r1"));
+ m.put(new Text("acf"), new Text("foo"), new Value(new byte[] {'1'}));
+
+ bw.addMutation(m);
+
+ bw.close();
+
+ // try to scan table
+ Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
+
+ boolean caught = false;
+ try {
+ for (Entry<Key,Value> entry : scanner) {
+ entry.getKey();
+ }
+ } catch (Exception e) {
+ caught = true;
+ }
+
+ if (!caught)
+ throw new Exception("Scan did not fail");
+
+ // try to batch scan the table
+ BatchScanner bs = c.createBatchScanner(tableName, Authorizations.EMPTY, 2);
+ bs.setRanges(Collections.singleton(new Range()));
+
+ caught = false;
+ try {
+ for (Entry<Key,Value> entry : bs) {
+ entry.getKey();
+ }
+ } catch (Exception e) {
+ caught = true;
+ } finally {
+ bs.close();
+ }
+
+ if (!caught)
+ throw new Exception("batch scan did not fail");
+
+ // remove the bad agg so accumulo can shutdown
+ TableOperations to = c.tableOperations();
+ for (Entry<String,String> e : to.getProperties(tableName)) {
+ to.removeProperty(tableName, e.getKey());
+ }
+
+ UtilWaitThread.sleep(500);
+
+ // should be able to scan now
+ scanner = c.createScanner(tableName, Authorizations.EMPTY);
+ for (Entry<Key,Value> entry : scanner) {
+ entry.getKey();
+ }
+
+ // set a non existant iterator, should cause scan to fail on server side
+ scanner.addScanIterator(new IteratorSetting(100, "bogus", "com.bogus.iterator"));
+
+ caught = false;
+ try {
+ for (Entry<Key,Value> entry : scanner) {
+ // should error
+ entry.getKey();
+ }
+ } catch (Exception e) {
+ caught = true;
+ }
+
+ if (!caught)
+ throw new Exception("Scan did not fail");
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
new file mode 100644
index 0000000..36bdd7a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.ConditionalWriter;
+import org.apache.accumulo.core.client.ConditionalWriter.Status;
+import org.apache.accumulo.core.client.ConditionalWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Durability;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Condition;
+import org.apache.accumulo.core.data.ConditionalMutation;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class SessionDurabilityIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ }
+
+ @Test(timeout = 3 * 60 * 1000)
+ public void nondurableTableHasDurableWrites() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ // table default has no durability
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "none");
+ // send durable writes
+ BatchWriterConfig cfg = new BatchWriterConfig();
+ cfg.setDurability(Durability.SYNC);
+ writeSome(tableName, 10, cfg);
+ assertEquals(10, count(tableName));
+ // verify writes servive restart
+ restartTServer();
+ assertEquals(10, count(tableName));
+ }
+
+ @Test(timeout = 3 * 60 * 1000)
+ public void durableTableLosesNonDurableWrites() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ // table default is durable writes
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "sync");
+ // write with no durability
+ BatchWriterConfig cfg = new BatchWriterConfig();
+ cfg.setDurability(Durability.NONE);
+ writeSome(tableName, 10, cfg);
+ // verify writes are lost on restart
+ restartTServer();
+ assertTrue(10 > count(tableName));
+ }
+
+ private int count(String tableName) throws Exception {
+ return Iterators.size(getConnector().createScanner(tableName, Authorizations.EMPTY).iterator());
+ }
+
+ private void writeSome(String tableName, int n, BatchWriterConfig cfg) throws Exception {
+ Connector c = getConnector();
+ BatchWriter bw = c.createBatchWriter(tableName, cfg);
+ for (int i = 0; i < n; i++) {
+ Mutation m = new Mutation(i + "");
+ m.put("", "", "");
+ bw.addMutation(m);
+ }
+ bw.close();
+ }
+
+ @Test(timeout = 3 * 60 * 1000)
+ public void testConditionDurability() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ // table default is durable writes
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "sync");
+ // write without durability
+ ConditionalWriterConfig cfg = new ConditionalWriterConfig();
+ cfg.setDurability(Durability.NONE);
+ conditionWriteSome(tableName, 10, cfg);
+ // everything in there?
+ assertEquals(10, count(tableName));
+ // restart the server and verify the updates are lost
+ restartTServer();
+ assertEquals(0, count(tableName));
+ }
+
+ @Test(timeout = 3 * 60 * 1000)
+ public void testConditionDurability2() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ // table default is durable writes
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "none");
+ // write with durability
+ ConditionalWriterConfig cfg = new ConditionalWriterConfig();
+ cfg.setDurability(Durability.SYNC);
+ conditionWriteSome(tableName, 10, cfg);
+ // everything in there?
+ assertEquals(10, count(tableName));
+ // restart the server and verify the updates are still there
+ restartTServer();
+ assertEquals(10, count(tableName));
+ }
+
+ private void conditionWriteSome(String tableName, int n, ConditionalWriterConfig cfg) throws Exception {
+ Connector c = getConnector();
+ ConditionalWriter cw = c.createConditionalWriter(tableName, cfg);
+ for (int i = 0; i < n; i++) {
+ ConditionalMutation m = new ConditionalMutation((CharSequence) (i + ""), new Condition("", ""));
+ m.put("", "", "X");
+ assertEquals(Status.ACCEPTED, cw.write(m).getStatus());
+ }
+ }
+
+ private void restartTServer() throws Exception {
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+ cluster.start();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ShutdownIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ShutdownIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ShutdownIT.java
new file mode 100644
index 0000000..f27ee02
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ShutdownIT.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.TestRandomDeletes;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+public class ShutdownIT extends ConfigurableMacBase {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void shutdownDuringIngest() throws Exception {
+ Process ingest = cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD,
+ "--createTable");
+ UtilWaitThread.sleep(100);
+ assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+ ingest.destroy();
+ }
+
+ @Test
+ public void shutdownDuringQuery() throws Exception {
+ assertEquals(0,
+ cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable")
+ .waitFor());
+ Process verify = cluster.exec(VerifyIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD);
+ UtilWaitThread.sleep(100);
+ assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+ verify.destroy();
+ }
+
+ @Test
+ public void shutdownDuringDelete() throws Exception {
+ assertEquals(0,
+ cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable")
+ .waitFor());
+ Process deleter = cluster.exec(TestRandomDeletes.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD);
+ UtilWaitThread.sleep(100);
+ assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+ deleter.destroy();
+ }
+
+ @Test
+ public void shutdownDuringDeleteTable() throws Exception {
+ final Connector c = getConnector();
+ for (int i = 0; i < 10; i++) {
+ c.tableOperations().create("table" + i);
+ }
+ final AtomicReference<Exception> ref = new AtomicReference<Exception>();
+ Thread async = new Thread() {
+ @Override
+ public void run() {
+ try {
+ for (int i = 0; i < 10; i++)
+ c.tableOperations().delete("table" + i);
+ } catch (Exception ex) {
+ ref.set(ex);
+ }
+ }
+ };
+ async.start();
+ UtilWaitThread.sleep(100);
+ assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+ if (ref.get() != null)
+ throw ref.get();
+ }
+
+ @Test
+ public void stopDuringStart() throws Exception {
+ assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+ }
+
+ @Test
+ public void adminStop() throws Exception {
+ runAdminStopTest(getConnector(), cluster);
+ }
+
+ static void runAdminStopTest(Connector c, MiniAccumuloClusterImpl cluster) throws InterruptedException, IOException {
+ assertEquals(0,
+ cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable")
+ .waitFor());
+ List<String> tabletServers = c.instanceOperations().getTabletServers();
+ assertEquals(2, tabletServers.size());
+ String doomed = tabletServers.get(0);
+ assertEquals(0, cluster.exec(Admin.class, "stop", doomed).waitFor());
+ tabletServers = c.instanceOperations().getTabletServers();
+ assertEquals(1, tabletServers.size());
+ assertFalse(tabletServers.get(0).equals(doomed));
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
new file mode 100644
index 0000000..3fcbcfb
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.impl.MasterClient;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.master.thrift.MasterClientService;
+import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
+import org.apache.accumulo.core.master.thrift.TableInfo;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.trace.Tracer;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MemoryUnit;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class SimpleBalancerFairnessIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.TSERV_MAXMEM, "10K");
+ cfg.setProperty(Property.TSERV_MAJC_DELAY, "0");
+ cfg.setMemory(ServerType.TABLET_SERVER, cfg.getMemory(ServerType.TABLET_SERVER) * 3, MemoryUnit.BYTE);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 10 * 60;
+ }
+
+ @Test
+ public void simpleBalancerFairness() throws Exception {
+ Connector c = getConnector();
+ c.tableOperations().create("test_ingest");
+ c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+ c.tableOperations().create("unused");
+ TreeSet<Text> splits = TestIngest.getSplitPoints(0, 10000000, 500);
+ log.info("Creating " + splits.size() + " splits");
+ c.tableOperations().addSplits("unused", splits);
+ List<String> tservers = c.instanceOperations().getTabletServers();
+ TestIngest.Opts opts = new TestIngest.Opts();
+ opts.rows = 50000;
+ opts.setPrincipal("root");
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+ c.tableOperations().flush("test_ingest", null, null, false);
+ UtilWaitThread.sleep(45 * 1000);
+ Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
+ ClientContext context = new ClientContext(c.getInstance(), creds, getClientConfig());
+
+ MasterMonitorInfo stats = null;
+ int unassignedTablets = 1;
+ for (int i = 0; unassignedTablets > 0 && i < 10; i++) {
+ MasterClientService.Iface client = null;
+ try {
+ client = MasterClient.getConnectionWithRetry(context);
+ stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(c.getInstance()));
+ } finally {
+ if (client != null)
+ MasterClient.close(client);
+ }
+ unassignedTablets = stats.getUnassignedTablets();
+ if (unassignedTablets > 0) {
+ log.info("Found " + unassignedTablets + " unassigned tablets, sleeping 3 seconds for tablet assignment");
+ Thread.sleep(3000);
+ }
+ }
+
+ assertEquals("Unassigned tablets were not assigned within 30 seconds", 0, unassignedTablets);
+
+ // Compute online tablets per tserver
+ List<Integer> counts = new ArrayList<Integer>();
+ for (TabletServerStatus server : stats.tServerInfo) {
+ int count = 0;
+ for (TableInfo table : server.tableMap.values()) {
+ count += table.onlineTablets;
+ }
+ counts.add(count);
+ }
+ assertTrue("Expected to have at least two TabletServers", counts.size() > 1);
+ for (int i = 1; i < counts.size(); i++) {
+ int diff = Math.abs(counts.get(0) - counts.get(i));
+ assertTrue("Expected difference in tablets to be less than or equal to " + counts.size() + " but was " + diff + ". Counts " + counts,
+ diff <= tservers.size());
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
new file mode 100644
index 0000000..8cece0b
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+/**
+ * This test recreates issue ACCUMULO-516. Until that issue is fixed this test should time out.
+ */
+public class SparseColumnFamilyIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void sparceColumnFamily() throws Exception {
+ String scftt = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ c.tableOperations().create(scftt);
+
+ BatchWriter bw = c.createBatchWriter(scftt, new BatchWriterConfig());
+
+ // create file in the tablet that has mostly column family 0, with a few entries for column family 1
+
+ bw.addMutation(nm(0, 1, 0));
+ for (int i = 1; i < 99999; i++) {
+ bw.addMutation(nm(i * 2, 0, i));
+ }
+ bw.addMutation(nm(99999 * 2, 1, 99999));
+ bw.flush();
+
+ c.tableOperations().flush(scftt, null, null, true);
+
+ // create a file that has column family 1 and 0 interleaved
+ for (int i = 0; i < 100000; i++) {
+ bw.addMutation(nm(i * 2 + 1, i % 2 == 0 ? 0 : 1, i));
+ }
+ bw.close();
+
+ c.tableOperations().flush(scftt, null, null, true);
+
+ Scanner scanner = c.createScanner(scftt, Authorizations.EMPTY);
+
+ for (int i = 0; i < 200; i++) {
+
+ // every time we search for column family 1, it will scan the entire file
+ // that has mostly column family 0 until the bug is fixed
+ scanner.setRange(new Range(String.format("%06d", i), null));
+ scanner.clearColumns();
+ scanner.setBatchSize(3);
+ scanner.fetchColumnFamily(new Text(String.format("%03d", 1)));
+
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+ if (iter.hasNext()) {
+ Entry<Key,Value> entry = iter.next();
+ if (!"001".equals(entry.getKey().getColumnFamilyData().toString())) {
+ throw new Exception();
+ }
+ }
+ }
+ }
+
+ private Mutation nm(int row, int cf, int val) {
+ Mutation m = new Mutation(String.format("%06d", row));
+ m.put(String.format("%03d", cf), "", "" + val);
+ return m;
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/SplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SplitIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SplitIT.java
new file mode 100644
index 0000000..49cd2aa
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SplitIT.java
@@ -0,0 +1,223 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.InstanceOperations;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.util.CheckForMetadataProblems;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+
+public class SplitIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(SplitIT.class);
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.TSERV_MAXMEM, "5K");
+ cfg.setProperty(Property.TSERV_MAJC_DELAY, "100ms");
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ private String tservMaxMem, tservMajcDelay;
+
+ @Before
+ public void alterConfig() throws Exception {
+ Assume.assumeTrue(ClusterType.MINI == getClusterType());
+
+ InstanceOperations iops = getConnector().instanceOperations();
+ Map<String,String> config = iops.getSystemConfiguration();
+ tservMaxMem = config.get(Property.TSERV_MAXMEM.getKey());
+ tservMajcDelay = config.get(Property.TSERV_MAJC_DELAY.getKey());
+
+ if (!tservMajcDelay.equals("100ms")) {
+ iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
+ }
+
+ // Property.TSERV_MAXMEM can't be altered on a running server
+ boolean restarted = false;
+ if (!tservMaxMem.equals("5K")) {
+ iops.setProperty(Property.TSERV_MAXMEM.getKey(), "5K");
+ getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
+ restarted = true;
+ }
+
+ // If we restarted the tservers, we don't need to re-wait for the majc delay
+ if (!restarted) {
+ long millis = AccumuloConfiguration.getTimeInMillis(tservMajcDelay);
+ log.info("Waiting for majc delay period: {}ms", millis);
+ Thread.sleep(millis);
+ log.info("Finished waiting for majc delay period");
+ }
+ }
+
+ @After
+ public void resetConfig() throws Exception {
+ if (null != tservMaxMem) {
+ log.info("Resetting {}={}", Property.TSERV_MAXMEM.getKey(), tservMaxMem);
+ getConnector().instanceOperations().setProperty(Property.TSERV_MAXMEM.getKey(), tservMaxMem);
+ tservMaxMem = null;
+ getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
+ }
+ if (null != tservMajcDelay) {
+ log.info("Resetting {}={}", Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay);
+ getConnector().instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay);
+ tservMajcDelay = null;
+ }
+ }
+
+ @Test
+ public void tabletShouldSplit() throws Exception {
+ Connector c = getConnector();
+ String table = getUniqueNames(1)[0];
+ c.tableOperations().create(table);
+ c.tableOperations().setProperty(table, Property.TABLE_SPLIT_THRESHOLD.getKey(), "256K");
+ c.tableOperations().setProperty(table, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K");
+ TestIngest.Opts opts = new TestIngest.Opts();
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+ opts.rows = 100000;
+ opts.setTableName(table);
+
+ ClientConfiguration clientConfig = cluster.getClientConfig();
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConfig);
+ vopts.updateKerberosCredentials(clientConfig);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ vopts.setPrincipal(getAdminPrincipal());
+ }
+
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+ vopts.rows = opts.rows;
+ vopts.setTableName(table);
+ VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+ while (c.tableOperations().listSplits(table).size() < 10) {
+ UtilWaitThread.sleep(15 * 1000);
+ }
+ String id = c.tableOperations().tableIdMap().get(table);
+ Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ KeyExtent extent = new KeyExtent(new Text(id), null, null);
+ s.setRange(extent.toMetadataRange());
+ MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s);
+ int count = 0;
+ int shortened = 0;
+ for (Entry<Key,Value> entry : s) {
+ extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
+ if (extent.getEndRow() != null && extent.getEndRow().toString().length() < 14)
+ shortened++;
+ count++;
+ }
+
+ assertTrue("Shortened should be greater than zero: " + shortened, shortened > 0);
+ assertTrue("Count should be cgreater than 10: " + count, count > 10);
+
+ String[] args;
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ ClusterUser rootUser = getAdminUser();
+ args = new String[] {"-i", cluster.getInstanceName(), "-u", rootUser.getPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-z",
+ cluster.getZooKeepers()};
+ } else {
+ PasswordToken token = (PasswordToken) getAdminToken();
+ args = new String[] {"-i", cluster.getInstanceName(), "-u", "root", "-p", new String(token.getPassword(), Charsets.UTF_8), "-z", cluster.getZooKeepers()};
+ }
+
+ assertEquals(0, getCluster().getClusterControl().exec(CheckForMetadataProblems.class, args));
+ }
+
+ @Test
+ public void interleaveSplit() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+ c.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
+ UtilWaitThread.sleep(5 * 1000);
+ ReadWriteIT.interleaveTest(c, tableName);
+ UtilWaitThread.sleep(5 * 1000);
+ int numSplits = c.tableOperations().listSplits(tableName).size();
+ while (numSplits <= 20) {
+ log.info("Waiting for splits to happen");
+ Thread.sleep(2000);
+ numSplits = c.tableOperations().listSplits(tableName).size();
+ }
+ assertTrue("Expected at least 20 splits, saw " + numSplits, numSplits > 20);
+ }
+
+ @Test
+ public void deleteSplit() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+ ClientConfiguration clientConfig = getCluster().getClientConfig();
+ String password = null, keytab = null;
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ keytab = getAdminUser().getKeytab().getAbsolutePath();
+ } else {
+ password = new String(((PasswordToken) getAdminToken()).getPassword(), Charsets.UTF_8);
+ }
+ DeleteIT.deleteTest(c, getCluster(), getAdminPrincipal(), password, tableName, keytab);
+ c.tableOperations().flush(tableName, null, null, true);
+ for (int i = 0; i < 5; i++) {
+ UtilWaitThread.sleep(10 * 1000);
+ if (c.tableOperations().listSplits(tableName).size() > 20)
+ break;
+ }
+ assertTrue(c.tableOperations().listSplits(tableName).size() > 20);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
new file mode 100644
index 0000000..4d13e2a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
@@ -0,0 +1,279 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.SortedMap;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.impl.ScannerImpl;
+import org.apache.accumulo.core.client.impl.Writer;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.ColumnFQ;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
+import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.master.state.Assignment;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.tablets.TabletTime;
+import org.apache.accumulo.server.util.FileUtil;
+import org.apache.accumulo.server.util.MasterMetadataUtil;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.server.zookeeper.TransactionWatcher;
+import org.apache.accumulo.server.zookeeper.ZooLock;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.tserver.TabletServer;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Multimap;
+
+public class SplitRecoveryIT extends ConfigurableMacBase {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ private KeyExtent nke(String table, String endRow, String prevEndRow) {
+ return new KeyExtent(new Text(table), endRow == null ? null : new Text(endRow), prevEndRow == null ? null : new Text(prevEndRow));
+ }
+
+ private void run() throws Exception {
+ Instance inst = HdfsZooInstance.getInstance();
+ AccumuloServerContext c = new AccumuloServerContext(new ServerConfigurationFactory(inst));
+ String zPath = ZooUtil.getRoot(inst) + "/testLock";
+ IZooReaderWriter zoo = ZooReaderWriter.getInstance();
+ zoo.putPersistentData(zPath, new byte[0], NodeExistsPolicy.OVERWRITE);
+ ZooLock zl = new ZooLock(zPath);
+ boolean gotLock = zl.tryLock(new LockWatcher() {
+
+ @Override
+ public void lostLock(LockLossReason reason) {
+ System.exit(-1);
+
+ }
+
+ @Override
+ public void unableToMonitorLockNode(Throwable e) {
+ System.exit(-1);
+ }
+ }, "foo".getBytes(UTF_8));
+
+ if (!gotLock) {
+ System.err.println("Failed to get lock " + zPath);
+ }
+
+ // run test for a table with one tablet
+ runSplitRecoveryTest(c, 0, "sp", 0, zl, nke("foo0", null, null));
+ runSplitRecoveryTest(c, 1, "sp", 0, zl, nke("foo1", null, null));
+
+ // run test for tables with two tablets, run test on first and last tablet
+ runSplitRecoveryTest(c, 0, "k", 0, zl, nke("foo2", "m", null), nke("foo2", null, "m"));
+ runSplitRecoveryTest(c, 1, "k", 0, zl, nke("foo3", "m", null), nke("foo3", null, "m"));
+ runSplitRecoveryTest(c, 0, "o", 1, zl, nke("foo4", "m", null), nke("foo4", null, "m"));
+ runSplitRecoveryTest(c, 1, "o", 1, zl, nke("foo5", "m", null), nke("foo5", null, "m"));
+
+ // run test for table w/ three tablets, run test on middle tablet
+ runSplitRecoveryTest(c, 0, "o", 1, zl, nke("foo6", "m", null), nke("foo6", "r", "m"), nke("foo6", null, "r"));
+ runSplitRecoveryTest(c, 1, "o", 1, zl, nke("foo7", "m", null), nke("foo7", "r", "m"), nke("foo7", null, "r"));
+
+ // run test for table w/ three tablets, run test on first
+ runSplitRecoveryTest(c, 0, "g", 0, zl, nke("foo8", "m", null), nke("foo8", "r", "m"), nke("foo8", null, "r"));
+ runSplitRecoveryTest(c, 1, "g", 0, zl, nke("foo9", "m", null), nke("foo9", "r", "m"), nke("foo9", null, "r"));
+
+ // run test for table w/ three tablets, run test on last tablet
+ runSplitRecoveryTest(c, 0, "w", 2, zl, nke("fooa", "m", null), nke("fooa", "r", "m"), nke("fooa", null, "r"));
+ runSplitRecoveryTest(c, 1, "w", 2, zl, nke("foob", "m", null), nke("foob", "r", "m"), nke("foob", null, "r"));
+ }
+
+ private void runSplitRecoveryTest(AccumuloServerContext context, int failPoint, String mr, int extentToSplit, ZooLock zl, KeyExtent... extents)
+ throws Exception {
+
+ Text midRow = new Text(mr);
+
+ SortedMap<FileRef,DataFileValue> splitMapFiles = null;
+
+ for (int i = 0; i < extents.length; i++) {
+ KeyExtent extent = extents[i];
+
+ String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId().toString() + "/dir_" + i;
+ MetadataTableUtil.addTablet(extent, tdir, context, TabletTime.LOGICAL_TIME_ID, zl);
+ SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<FileRef,DataFileValue>();
+ mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i));
+
+ if (i == extentToSplit) {
+ splitMapFiles = mapFiles;
+ }
+ int tid = 0;
+ TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
+ MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", context, zl);
+ }
+
+ KeyExtent extent = extents[extentToSplit];
+
+ KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow);
+ KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow());
+
+ splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl);
+ }
+
+ private void splitPartiallyAndRecover(AccumuloServerContext context, KeyExtent extent, KeyExtent high, KeyExtent low, double splitRatio,
+ SortedMap<FileRef,DataFileValue> mapFiles, Text midRow, String location, int steps, ZooLock zl) throws Exception {
+
+ SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<FileRef,DataFileValue>();
+ SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
+ List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
+
+ MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), mapFiles, lowDatafileSizes,
+ highDatafileSizes, highDatafilesToRemove);
+
+ MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, context, zl);
+ TServerInstance instance = new TServerInstance(location, zl.getSessionId());
+ Writer writer = MetadataTableUtil.getMetadataTable(context);
+ Assignment assignment = new Assignment(high, instance);
+ Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
+ assignment.server.putFutureLocation(m);
+ writer.update(m);
+
+ if (steps >= 1) {
+ Multimap<Long,FileRef> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, extent);
+ MasterMetadataUtil.addNewTablet(context, low, "/lowDir", instance, lowDatafileSizes, bulkFiles, TabletTime.LOGICAL_TIME_ID + "0", -1l, -1l, zl);
+ }
+ if (steps >= 2) {
+ MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, context, zl);
+ }
+
+ TabletServer.verifyTabletInformation(context, high, instance, null, "127.0.0.1:0", zl);
+
+ if (steps >= 1) {
+ ensureTabletHasNoUnexpectedMetadataEntries(context, low, lowDatafileSizes);
+ ensureTabletHasNoUnexpectedMetadataEntries(context, high, highDatafileSizes);
+
+ Multimap<Long,FileRef> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, low);
+ Multimap<Long,FileRef> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, high);
+
+ if (!lowBulkFiles.equals(highBulkFiles)) {
+ throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high);
+ }
+
+ if (lowBulkFiles.size() == 0) {
+ throw new Exception(" no bulk files " + low);
+ }
+ } else {
+ ensureTabletHasNoUnexpectedMetadataEntries(context, extent, mapFiles);
+ }
+ }
+
+ private void ensureTabletHasNoUnexpectedMetadataEntries(AccumuloServerContext context, KeyExtent extent, SortedMap<FileRef,DataFileValue> expectedMapFiles)
+ throws Exception {
+ Scanner scanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
+ scanner.setRange(extent.toMetadataRange());
+
+ HashSet<ColumnFQ> expectedColumns = new HashSet<ColumnFQ>();
+ expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN);
+ expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
+ expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN);
+ expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN);
+
+ HashSet<Text> expectedColumnFamilies = new HashSet<Text>();
+ expectedColumnFamilies.add(DataFileColumnFamily.NAME);
+ expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME);
+ expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME);
+ expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME);
+ expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME);
+
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+ while (iter.hasNext()) {
+ Key key = iter.next().getKey();
+
+ if (!key.getRow().equals(extent.getMetadataEntry())) {
+ throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
+ }
+
+ if (expectedColumnFamilies.contains(key.getColumnFamily())) {
+ continue;
+ }
+
+ if (expectedColumns.remove(new ColumnFQ(key))) {
+ continue;
+ }
+
+ throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
+ }
+ System.out.println("expectedColumns " + expectedColumns);
+ if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
+ throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
+ }
+
+ SortedMap<FileRef,DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent, context);
+ verifySame(expectedMapFiles, fixedMapFiles);
+ }
+
+ private void verifySame(SortedMap<FileRef,DataFileValue> datafileSizes, SortedMap<FileRef,DataFileValue> fixedDatafileSizes) throws Exception {
+
+ if (!datafileSizes.keySet().containsAll(fixedDatafileSizes.keySet()) || !fixedDatafileSizes.keySet().containsAll(datafileSizes.keySet())) {
+ throw new Exception("Key sets not the same " + datafileSizes.keySet() + " != " + fixedDatafileSizes.keySet());
+ }
+
+ for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
+ DataFileValue dfv = entry.getValue();
+ DataFileValue otherDfv = fixedDatafileSizes.get(entry.getKey());
+
+ if (!dfv.equals(otherDfv)) {
+ throw new Exception(entry.getKey() + " dfv not equal " + dfv + " " + otherDfv);
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ new SplitRecoveryIT().run();
+ }
+
+ @Test
+ public void test() throws Exception {
+ assertEquals(0, exec(SplitRecoveryIT.class).waitFor());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/SslIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SslIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SslIT.java
new file mode 100644
index 0000000..13248d0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SslIT.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Test;
+
+/**
+ * Do a selection of ITs with SSL turned on that cover a range of different connection scenarios. Note that you can run *all* the ITs against SSL-enabled mini
+ * clusters with `mvn verify -DuseSslForIT`
+ *
+ */
+public class SslIT extends ConfigurableMacBase {
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 6 * 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ super.configure(cfg, hadoopCoreSite);
+ configureForSsl(cfg, getSslDir(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName())));
+ }
+
+ @Test
+ public void binary() throws AccumuloException, AccumuloSecurityException, Exception {
+ String tableName = getUniqueNames(1)[0];
+ getConnector().tableOperations().create(tableName);
+ BinaryIT.runTest(getConnector(), tableName);
+ }
+
+ @Test
+ public void concurrency() throws Exception {
+ ConcurrencyIT.runTest(getConnector(), getUniqueNames(1)[0]);
+ }
+
+ @Test
+ public void adminStop() throws Exception {
+ ShutdownIT.runAdminStopTest(getConnector(), getCluster());
+ }
+
+ @Test
+ public void bulk() throws Exception {
+ BulkIT.runTest(getConnector(), FileSystem.getLocal(new Configuration(false)), new Path(getCluster().getConfig().getDir().getAbsolutePath(), "tmp"), "root",
+ getUniqueNames(1)[0], this.getClass().getName(), testName.getMethodName());
+ }
+
+ @Test
+ public void mapReduce() throws Exception {
+ MapReduceIT.runTest(getConnector(), getCluster());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java
new file mode 100644
index 0000000..bb00b19
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Map;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+/**
+ * Run all the same tests as SslIT, but with client auth turned on.
+ *
+ * All the methods are overridden just to make it easier to run individual tests from an IDE.
+ *
+ */
+public class SslWithClientAuthIT extends SslIT {
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ super.configure(cfg, hadoopCoreSite);
+ Map<String,String> site = cfg.getSiteConfig();
+ site.put(Property.INSTANCE_RPC_SSL_CLIENT_AUTH.getKey(), "true");
+ cfg.setSiteConfig(site);
+ }
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 8 * 60;
+ }
+
+ @Override
+ @Test
+ public void binary() throws AccumuloException, AccumuloSecurityException, Exception {
+ super.binary();
+ }
+
+ @Override
+ @Test
+ public void concurrency() throws Exception {
+ super.concurrency();
+ }
+
+ @Override
+ @Test
+ public void adminStop() throws Exception {
+ super.adminStop();
+ }
+
+ @Override
+ @Test
+ public void bulk() throws Exception {
+ super.bulk();
+ }
+
+ @Override
+ @Test
+ public void mapReduce() throws Exception {
+ super.mapReduce();
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/StartIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/StartIT.java b/test/src/main/java/org/apache/accumulo/test/functional/StartIT.java
new file mode 100644
index 0000000..57a8a6f
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/StartIT.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+
+import org.apache.accumulo.cluster.ClusterControl;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.start.TestMain;
+import org.junit.Test;
+
+public class StartIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 30;
+ }
+
+ @Test
+ public void test() throws Exception {
+ ClusterControl control = getCluster().getClusterControl();
+
+ assertNotEquals(0, control.exec(TestMain.class, new String[] {"exception"}));
+ assertEquals(0, control.exec(TestMain.class, new String[] {"success"}));
+ assertNotEquals(0, control.exec(TestMain.class, new String[0]));
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/TableIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/TableIT.java b/test/src/main/java/org/apache/accumulo/test/functional/TableIT.java
new file mode 100644
index 0000000..a4678a7
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/TableIT.java
@@ -0,0 +1,108 @@
+package org.apache.accumulo.test.functional;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.FileNotFoundException;
+
+import org.apache.accumulo.cluster.AccumuloCluster;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assume;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class TableIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI));
+
+ AccumuloCluster cluster = getCluster();
+ MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
+ String rootPath = mac.getConfig().getDir().getAbsolutePath();
+
+ Connector c = getConnector();
+ TableOperations to = c.tableOperations();
+ String tableName = getUniqueNames(1)[0];
+ to.create(tableName);
+
+ TestIngest.Opts opts = new TestIngest.Opts();
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+ ClientConfiguration clientConfig = getCluster().getClientConfig();
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConfig);
+ vopts.updateKerberosCredentials(clientConfig);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ vopts.setPrincipal(getAdminPrincipal());
+ }
+
+ opts.setTableName(tableName);
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+ to.flush(tableName, null, null, true);
+ vopts.setTableName(tableName);
+ VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+ String id = to.tableIdMap().get(tableName);
+ Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(new KeyExtent(new Text(id), null, null).toMetadataRange());
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+ assertTrue(Iterators.size(s.iterator()) > 0);
+
+ FileSystem fs = getCluster().getFileSystem();
+ assertTrue(fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length > 0);
+ to.delete(tableName);
+ assertEquals(0, Iterators.size(s.iterator()));
+ try {
+ assertEquals(0, fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length);
+ } catch (FileNotFoundException ex) {
+ // that's fine, too
+ }
+ assertNull(to.tableIdMap().get(tableName));
+ to.create(tableName);
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+ VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+ to.delete(tableName);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/TabletIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/TabletIT.java b/test/src/main/java/org/apache/accumulo/test/functional/TabletIT.java
new file mode 100644
index 0000000..d2b1416
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/TabletIT.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.MemoryUnit;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class TabletIT extends AccumuloClusterHarness {
+
+ private static final int N = 1000;
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.TSERV_MAXMEM.getKey(), "128M");
+ cfg.setDefaultMemory(256, MemoryUnit.MEGABYTE);
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void createTableTest() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ createTableTest(tableName, false);
+ createTableTest(tableName, true);
+ }
+
+ public void createTableTest(String tableName, boolean readOnly) throws Exception {
+ // create the test table within accumulo
+ Connector connector = getConnector();
+
+ if (!readOnly) {
+ TreeSet<Text> keys = new TreeSet<Text>();
+ for (int i = N / 100; i < N; i += N / 100) {
+ keys.add(new Text(String.format("%05d", i)));
+ }
+
+ // presplit
+ connector.tableOperations().create(tableName);
+ connector.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "200");
+ connector.tableOperations().addSplits(tableName, keys);
+ BatchWriter b = connector.createBatchWriter(tableName, new BatchWriterConfig());
+
+ // populate
+ for (int i = 0; i < N; i++) {
+ Mutation m = new Mutation(new Text(String.format("%05d", i)));
+ m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes(UTF_8)));
+ b.addMutation(m);
+ }
+ b.close();
+ }
+
+ Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
+ int count = 0;
+ for (Entry<Key,Value> elt : scanner) {
+ String expected = String.format("%05d", count);
+ assert (elt.getKey().getRow().toString().equals(expected));
+ count++;
+ }
+ assertEquals(N, count);
+ }
+
+}
[08/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
deleted file mode 100644
index 3fcbcfb..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.MasterClient;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.master.thrift.MasterClientService;
-import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
-import org.apache.accumulo.core.master.thrift.TableInfo;
-import org.apache.accumulo.core.master.thrift.TabletServerStatus;
-import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class SimpleBalancerFairnessIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.TSERV_MAXMEM, "10K");
- cfg.setProperty(Property.TSERV_MAJC_DELAY, "0");
- cfg.setMemory(ServerType.TABLET_SERVER, cfg.getMemory(ServerType.TABLET_SERVER) * 3, MemoryUnit.BYTE);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 10 * 60;
- }
-
- @Test
- public void simpleBalancerFairness() throws Exception {
- Connector c = getConnector();
- c.tableOperations().create("test_ingest");
- c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
- c.tableOperations().create("unused");
- TreeSet<Text> splits = TestIngest.getSplitPoints(0, 10000000, 500);
- log.info("Creating " + splits.size() + " splits");
- c.tableOperations().addSplits("unused", splits);
- List<String> tservers = c.instanceOperations().getTabletServers();
- TestIngest.Opts opts = new TestIngest.Opts();
- opts.rows = 50000;
- opts.setPrincipal("root");
- TestIngest.ingest(c, opts, new BatchWriterOpts());
- c.tableOperations().flush("test_ingest", null, null, false);
- UtilWaitThread.sleep(45 * 1000);
- Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
- ClientContext context = new ClientContext(c.getInstance(), creds, getClientConfig());
-
- MasterMonitorInfo stats = null;
- int unassignedTablets = 1;
- for (int i = 0; unassignedTablets > 0 && i < 10; i++) {
- MasterClientService.Iface client = null;
- try {
- client = MasterClient.getConnectionWithRetry(context);
- stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(c.getInstance()));
- } finally {
- if (client != null)
- MasterClient.close(client);
- }
- unassignedTablets = stats.getUnassignedTablets();
- if (unassignedTablets > 0) {
- log.info("Found " + unassignedTablets + " unassigned tablets, sleeping 3 seconds for tablet assignment");
- Thread.sleep(3000);
- }
- }
-
- assertEquals("Unassigned tablets were not assigned within 30 seconds", 0, unassignedTablets);
-
- // Compute online tablets per tserver
- List<Integer> counts = new ArrayList<Integer>();
- for (TabletServerStatus server : stats.tServerInfo) {
- int count = 0;
- for (TableInfo table : server.tableMap.values()) {
- count += table.onlineTablets;
- }
- counts.add(count);
- }
- assertTrue("Expected to have at least two TabletServers", counts.size() > 1);
- for (int i = 1; i < counts.size(); i++) {
- int diff = Math.abs(counts.get(0) - counts.get(i));
- assertTrue("Expected difference in tablets to be less than or equal to " + counts.size() + " but was " + diff + ". Counts " + counts,
- diff <= tservers.size());
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
deleted file mode 100644
index 8cece0b..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-/**
- * This test recreates issue ACCUMULO-516. Until that issue is fixed this test should time out.
- */
-public class SparseColumnFamilyIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void sparceColumnFamily() throws Exception {
- String scftt = getUniqueNames(1)[0];
- Connector c = getConnector();
- c.tableOperations().create(scftt);
-
- BatchWriter bw = c.createBatchWriter(scftt, new BatchWriterConfig());
-
- // create file in the tablet that has mostly column family 0, with a few entries for column family 1
-
- bw.addMutation(nm(0, 1, 0));
- for (int i = 1; i < 99999; i++) {
- bw.addMutation(nm(i * 2, 0, i));
- }
- bw.addMutation(nm(99999 * 2, 1, 99999));
- bw.flush();
-
- c.tableOperations().flush(scftt, null, null, true);
-
- // create a file that has column family 1 and 0 interleaved
- for (int i = 0; i < 100000; i++) {
- bw.addMutation(nm(i * 2 + 1, i % 2 == 0 ? 0 : 1, i));
- }
- bw.close();
-
- c.tableOperations().flush(scftt, null, null, true);
-
- Scanner scanner = c.createScanner(scftt, Authorizations.EMPTY);
-
- for (int i = 0; i < 200; i++) {
-
- // every time we search for column family 1, it will scan the entire file
- // that has mostly column family 0 until the bug is fixed
- scanner.setRange(new Range(String.format("%06d", i), null));
- scanner.clearColumns();
- scanner.setBatchSize(3);
- scanner.fetchColumnFamily(new Text(String.format("%03d", 1)));
-
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
- if (iter.hasNext()) {
- Entry<Key,Value> entry = iter.next();
- if (!"001".equals(entry.getKey().getColumnFamilyData().toString())) {
- throw new Exception();
- }
- }
- }
- }
-
- private Mutation nm(int row, int cf, int val) {
- Mutation m = new Mutation(String.format("%06d", row));
- m.put(String.format("%03d", cf), "", "" + val);
- return m;
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
deleted file mode 100644
index 49cd2aa..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.InstanceOperations;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.util.CheckForMetadataProblems;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Charsets;
-
-public class SplitIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(SplitIT.class);
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.TSERV_MAXMEM, "5K");
- cfg.setProperty(Property.TSERV_MAJC_DELAY, "100ms");
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- private String tservMaxMem, tservMajcDelay;
-
- @Before
- public void alterConfig() throws Exception {
- Assume.assumeTrue(ClusterType.MINI == getClusterType());
-
- InstanceOperations iops = getConnector().instanceOperations();
- Map<String,String> config = iops.getSystemConfiguration();
- tservMaxMem = config.get(Property.TSERV_MAXMEM.getKey());
- tservMajcDelay = config.get(Property.TSERV_MAJC_DELAY.getKey());
-
- if (!tservMajcDelay.equals("100ms")) {
- iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
- }
-
- // Property.TSERV_MAXMEM can't be altered on a running server
- boolean restarted = false;
- if (!tservMaxMem.equals("5K")) {
- iops.setProperty(Property.TSERV_MAXMEM.getKey(), "5K");
- getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
- restarted = true;
- }
-
- // If we restarted the tservers, we don't need to re-wait for the majc delay
- if (!restarted) {
- long millis = AccumuloConfiguration.getTimeInMillis(tservMajcDelay);
- log.info("Waiting for majc delay period: {}ms", millis);
- Thread.sleep(millis);
- log.info("Finished waiting for majc delay period");
- }
- }
-
- @After
- public void resetConfig() throws Exception {
- if (null != tservMaxMem) {
- log.info("Resetting {}={}", Property.TSERV_MAXMEM.getKey(), tservMaxMem);
- getConnector().instanceOperations().setProperty(Property.TSERV_MAXMEM.getKey(), tservMaxMem);
- tservMaxMem = null;
- getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
- }
- if (null != tservMajcDelay) {
- log.info("Resetting {}={}", Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay);
- getConnector().instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay);
- tservMajcDelay = null;
- }
- }
-
- @Test
- public void tabletShouldSplit() throws Exception {
- Connector c = getConnector();
- String table = getUniqueNames(1)[0];
- c.tableOperations().create(table);
- c.tableOperations().setProperty(table, Property.TABLE_SPLIT_THRESHOLD.getKey(), "256K");
- c.tableOperations().setProperty(table, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K");
- TestIngest.Opts opts = new TestIngest.Opts();
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
- opts.rows = 100000;
- opts.setTableName(table);
-
- ClientConfiguration clientConfig = cluster.getClientConfig();
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConfig);
- vopts.updateKerberosCredentials(clientConfig);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- vopts.setPrincipal(getAdminPrincipal());
- }
-
- TestIngest.ingest(c, opts, new BatchWriterOpts());
- vopts.rows = opts.rows;
- vopts.setTableName(table);
- VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
- while (c.tableOperations().listSplits(table).size() < 10) {
- UtilWaitThread.sleep(15 * 1000);
- }
- String id = c.tableOperations().tableIdMap().get(table);
- Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- KeyExtent extent = new KeyExtent(new Text(id), null, null);
- s.setRange(extent.toMetadataRange());
- MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s);
- int count = 0;
- int shortened = 0;
- for (Entry<Key,Value> entry : s) {
- extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
- if (extent.getEndRow() != null && extent.getEndRow().toString().length() < 14)
- shortened++;
- count++;
- }
-
- assertTrue("Shortened should be greater than zero: " + shortened, shortened > 0);
- assertTrue("Count should be cgreater than 10: " + count, count > 10);
-
- String[] args;
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- ClusterUser rootUser = getAdminUser();
- args = new String[] {"-i", cluster.getInstanceName(), "-u", rootUser.getPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-z",
- cluster.getZooKeepers()};
- } else {
- PasswordToken token = (PasswordToken) getAdminToken();
- args = new String[] {"-i", cluster.getInstanceName(), "-u", "root", "-p", new String(token.getPassword(), Charsets.UTF_8), "-z", cluster.getZooKeepers()};
- }
-
- assertEquals(0, getCluster().getClusterControl().exec(CheckForMetadataProblems.class, args));
- }
-
- @Test
- public void interleaveSplit() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
- c.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
- UtilWaitThread.sleep(5 * 1000);
- ReadWriteIT.interleaveTest(c, tableName);
- UtilWaitThread.sleep(5 * 1000);
- int numSplits = c.tableOperations().listSplits(tableName).size();
- while (numSplits <= 20) {
- log.info("Waiting for splits to happen");
- Thread.sleep(2000);
- numSplits = c.tableOperations().listSplits(tableName).size();
- }
- assertTrue("Expected at least 20 splits, saw " + numSplits, numSplits > 20);
- }
-
- @Test
- public void deleteSplit() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
- ClientConfiguration clientConfig = getCluster().getClientConfig();
- String password = null, keytab = null;
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- keytab = getAdminUser().getKeytab().getAbsolutePath();
- } else {
- password = new String(((PasswordToken) getAdminToken()).getPassword(), Charsets.UTF_8);
- }
- DeleteIT.deleteTest(c, getCluster(), getAdminPrincipal(), password, tableName, keytab);
- c.tableOperations().flush(tableName, null, null, true);
- for (int i = 0; i < 5; i++) {
- UtilWaitThread.sleep(10 * 1000);
- if (c.tableOperations().listSplits(tableName).size() > 20)
- break;
- }
- assertTrue(c.tableOperations().listSplits(tableName).size() > 20);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
deleted file mode 100644
index 4d13e2a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.ScannerImpl;
-import org.apache.accumulo.core.client.impl.Writer;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.file.rfile.RFile;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.DataFileValue;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
-import org.apache.accumulo.fate.zookeeper.ZooLock.LockWatcher;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
-import org.apache.accumulo.server.AccumuloServerContext;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
-import org.apache.accumulo.server.fs.FileRef;
-import org.apache.accumulo.server.master.state.Assignment;
-import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.tablets.TabletTime;
-import org.apache.accumulo.server.util.FileUtil;
-import org.apache.accumulo.server.util.MasterMetadataUtil;
-import org.apache.accumulo.server.util.MetadataTableUtil;
-import org.apache.accumulo.server.zookeeper.TransactionWatcher;
-import org.apache.accumulo.server.zookeeper.ZooLock;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.tserver.TabletServer;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Multimap;
-
-public class SplitRecoveryIT extends ConfigurableMacBase {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- private KeyExtent nke(String table, String endRow, String prevEndRow) {
- return new KeyExtent(new Text(table), endRow == null ? null : new Text(endRow), prevEndRow == null ? null : new Text(prevEndRow));
- }
-
- private void run() throws Exception {
- Instance inst = HdfsZooInstance.getInstance();
- AccumuloServerContext c = new AccumuloServerContext(new ServerConfigurationFactory(inst));
- String zPath = ZooUtil.getRoot(inst) + "/testLock";
- IZooReaderWriter zoo = ZooReaderWriter.getInstance();
- zoo.putPersistentData(zPath, new byte[0], NodeExistsPolicy.OVERWRITE);
- ZooLock zl = new ZooLock(zPath);
- boolean gotLock = zl.tryLock(new LockWatcher() {
-
- @Override
- public void lostLock(LockLossReason reason) {
- System.exit(-1);
-
- }
-
- @Override
- public void unableToMonitorLockNode(Throwable e) {
- System.exit(-1);
- }
- }, "foo".getBytes(UTF_8));
-
- if (!gotLock) {
- System.err.println("Failed to get lock " + zPath);
- }
-
- // run test for a table with one tablet
- runSplitRecoveryTest(c, 0, "sp", 0, zl, nke("foo0", null, null));
- runSplitRecoveryTest(c, 1, "sp", 0, zl, nke("foo1", null, null));
-
- // run test for tables with two tablets, run test on first and last tablet
- runSplitRecoveryTest(c, 0, "k", 0, zl, nke("foo2", "m", null), nke("foo2", null, "m"));
- runSplitRecoveryTest(c, 1, "k", 0, zl, nke("foo3", "m", null), nke("foo3", null, "m"));
- runSplitRecoveryTest(c, 0, "o", 1, zl, nke("foo4", "m", null), nke("foo4", null, "m"));
- runSplitRecoveryTest(c, 1, "o", 1, zl, nke("foo5", "m", null), nke("foo5", null, "m"));
-
- // run test for table w/ three tablets, run test on middle tablet
- runSplitRecoveryTest(c, 0, "o", 1, zl, nke("foo6", "m", null), nke("foo6", "r", "m"), nke("foo6", null, "r"));
- runSplitRecoveryTest(c, 1, "o", 1, zl, nke("foo7", "m", null), nke("foo7", "r", "m"), nke("foo7", null, "r"));
-
- // run test for table w/ three tablets, run test on first
- runSplitRecoveryTest(c, 0, "g", 0, zl, nke("foo8", "m", null), nke("foo8", "r", "m"), nke("foo8", null, "r"));
- runSplitRecoveryTest(c, 1, "g", 0, zl, nke("foo9", "m", null), nke("foo9", "r", "m"), nke("foo9", null, "r"));
-
- // run test for table w/ three tablets, run test on last tablet
- runSplitRecoveryTest(c, 0, "w", 2, zl, nke("fooa", "m", null), nke("fooa", "r", "m"), nke("fooa", null, "r"));
- runSplitRecoveryTest(c, 1, "w", 2, zl, nke("foob", "m", null), nke("foob", "r", "m"), nke("foob", null, "r"));
- }
-
- private void runSplitRecoveryTest(AccumuloServerContext context, int failPoint, String mr, int extentToSplit, ZooLock zl, KeyExtent... extents)
- throws Exception {
-
- Text midRow = new Text(mr);
-
- SortedMap<FileRef,DataFileValue> splitMapFiles = null;
-
- for (int i = 0; i < extents.length; i++) {
- KeyExtent extent = extents[i];
-
- String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId().toString() + "/dir_" + i;
- MetadataTableUtil.addTablet(extent, tdir, context, TabletTime.LOGICAL_TIME_ID, zl);
- SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<FileRef,DataFileValue>();
- mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i));
-
- if (i == extentToSplit) {
- splitMapFiles = mapFiles;
- }
- int tid = 0;
- TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
- MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", context, zl);
- }
-
- KeyExtent extent = extents[extentToSplit];
-
- KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow);
- KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow());
-
- splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl);
- }
-
- private void splitPartiallyAndRecover(AccumuloServerContext context, KeyExtent extent, KeyExtent high, KeyExtent low, double splitRatio,
- SortedMap<FileRef,DataFileValue> mapFiles, Text midRow, String location, int steps, ZooLock zl) throws Exception {
-
- SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<FileRef,DataFileValue>();
- SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
- List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
-
- MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), mapFiles, lowDatafileSizes,
- highDatafileSizes, highDatafilesToRemove);
-
- MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, context, zl);
- TServerInstance instance = new TServerInstance(location, zl.getSessionId());
- Writer writer = MetadataTableUtil.getMetadataTable(context);
- Assignment assignment = new Assignment(high, instance);
- Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
- assignment.server.putFutureLocation(m);
- writer.update(m);
-
- if (steps >= 1) {
- Multimap<Long,FileRef> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, extent);
- MasterMetadataUtil.addNewTablet(context, low, "/lowDir", instance, lowDatafileSizes, bulkFiles, TabletTime.LOGICAL_TIME_ID + "0", -1l, -1l, zl);
- }
- if (steps >= 2) {
- MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, context, zl);
- }
-
- TabletServer.verifyTabletInformation(context, high, instance, null, "127.0.0.1:0", zl);
-
- if (steps >= 1) {
- ensureTabletHasNoUnexpectedMetadataEntries(context, low, lowDatafileSizes);
- ensureTabletHasNoUnexpectedMetadataEntries(context, high, highDatafileSizes);
-
- Multimap<Long,FileRef> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, low);
- Multimap<Long,FileRef> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, high);
-
- if (!lowBulkFiles.equals(highBulkFiles)) {
- throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high);
- }
-
- if (lowBulkFiles.size() == 0) {
- throw new Exception(" no bulk files " + low);
- }
- } else {
- ensureTabletHasNoUnexpectedMetadataEntries(context, extent, mapFiles);
- }
- }
-
- private void ensureTabletHasNoUnexpectedMetadataEntries(AccumuloServerContext context, KeyExtent extent, SortedMap<FileRef,DataFileValue> expectedMapFiles)
- throws Exception {
- Scanner scanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
- scanner.setRange(extent.toMetadataRange());
-
- HashSet<ColumnFQ> expectedColumns = new HashSet<ColumnFQ>();
- expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN);
- expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
- expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN);
- expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN);
-
- HashSet<Text> expectedColumnFamilies = new HashSet<Text>();
- expectedColumnFamilies.add(DataFileColumnFamily.NAME);
- expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME);
- expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME);
- expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME);
- expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME);
-
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
- while (iter.hasNext()) {
- Key key = iter.next().getKey();
-
- if (!key.getRow().equals(extent.getMetadataEntry())) {
- throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
- }
-
- if (expectedColumnFamilies.contains(key.getColumnFamily())) {
- continue;
- }
-
- if (expectedColumns.remove(new ColumnFQ(key))) {
- continue;
- }
-
- throw new Exception("Tablet " + extent + " contained unexpected " + MetadataTable.NAME + " entry " + key);
- }
- System.out.println("expectedColumns " + expectedColumns);
- if (expectedColumns.size() > 1 || (expectedColumns.size() == 1)) {
- throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
- }
-
- SortedMap<FileRef,DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent, context);
- verifySame(expectedMapFiles, fixedMapFiles);
- }
-
- private void verifySame(SortedMap<FileRef,DataFileValue> datafileSizes, SortedMap<FileRef,DataFileValue> fixedDatafileSizes) throws Exception {
-
- if (!datafileSizes.keySet().containsAll(fixedDatafileSizes.keySet()) || !fixedDatafileSizes.keySet().containsAll(datafileSizes.keySet())) {
- throw new Exception("Key sets not the same " + datafileSizes.keySet() + " != " + fixedDatafileSizes.keySet());
- }
-
- for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
- DataFileValue dfv = entry.getValue();
- DataFileValue otherDfv = fixedDatafileSizes.get(entry.getKey());
-
- if (!dfv.equals(otherDfv)) {
- throw new Exception(entry.getKey() + " dfv not equal " + dfv + " " + otherDfv);
- }
- }
- }
-
- public static void main(String[] args) throws Exception {
- new SplitRecoveryIT().run();
- }
-
- @Test
- public void test() throws Exception {
- assertEquals(0, exec(SplitRecoveryIT.class).waitFor());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SslIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SslIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SslIT.java
deleted file mode 100644
index 13248d0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/SslIT.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.Test;
-
-/**
- * Do a selection of ITs with SSL turned on that cover a range of different connection scenarios. Note that you can run *all* the ITs against SSL-enabled mini
- * clusters with `mvn verify -DuseSslForIT`
- *
- */
-public class SslIT extends ConfigurableMacBase {
- @Override
- public int defaultTimeoutSeconds() {
- return 6 * 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- super.configure(cfg, hadoopCoreSite);
- configureForSsl(cfg, getSslDir(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName())));
- }
-
- @Test
- public void binary() throws AccumuloException, AccumuloSecurityException, Exception {
- String tableName = getUniqueNames(1)[0];
- getConnector().tableOperations().create(tableName);
- BinaryIT.runTest(getConnector(), tableName);
- }
-
- @Test
- public void concurrency() throws Exception {
- ConcurrencyIT.runTest(getConnector(), getUniqueNames(1)[0]);
- }
-
- @Test
- public void adminStop() throws Exception {
- ShutdownIT.runAdminStopTest(getConnector(), getCluster());
- }
-
- @Test
- public void bulk() throws Exception {
- BulkIT.runTest(getConnector(), FileSystem.getLocal(new Configuration(false)), new Path(getCluster().getConfig().getDir().getAbsolutePath(), "tmp"), "root",
- getUniqueNames(1)[0], this.getClass().getName(), testName.getMethodName());
- }
-
- @Test
- public void mapReduce() throws Exception {
- MapReduceIT.runTest(getConnector(), getCluster());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java
deleted file mode 100644
index bb00b19..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/SslWithClientAuthIT.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Map;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-/**
- * Run all the same tests as SslIT, but with client auth turned on.
- *
- * All the methods are overridden just to make it easier to run individual tests from an IDE.
- *
- */
-public class SslWithClientAuthIT extends SslIT {
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- super.configure(cfg, hadoopCoreSite);
- Map<String,String> site = cfg.getSiteConfig();
- site.put(Property.INSTANCE_RPC_SSL_CLIENT_AUTH.getKey(), "true");
- cfg.setSiteConfig(site);
- }
-
- @Override
- public int defaultTimeoutSeconds() {
- return 8 * 60;
- }
-
- @Override
- @Test
- public void binary() throws AccumuloException, AccumuloSecurityException, Exception {
- super.binary();
- }
-
- @Override
- @Test
- public void concurrency() throws Exception {
- super.concurrency();
- }
-
- @Override
- @Test
- public void adminStop() throws Exception {
- super.adminStop();
- }
-
- @Override
- @Test
- public void bulk() throws Exception {
- super.bulk();
- }
-
- @Override
- @Test
- public void mapReduce() throws Exception {
- super.mapReduce();
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java
deleted file mode 100644
index 57a8a6f..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-
-import org.apache.accumulo.cluster.ClusterControl;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.start.TestMain;
-import org.junit.Test;
-
-public class StartIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 30;
- }
-
- @Test
- public void test() throws Exception {
- ClusterControl control = getCluster().getClusterControl();
-
- assertNotEquals(0, control.exec(TestMain.class, new String[] {"exception"}));
- assertEquals(0, control.exec(TestMain.class, new String[] {"success"}));
- assertNotEquals(0, control.exec(TestMain.class, new String[0]));
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
deleted file mode 100644
index a4678a7..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
+++ /dev/null
@@ -1,108 +0,0 @@
-package org.apache.accumulo.test.functional;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.FileNotFoundException;
-
-import org.apache.accumulo.cluster.AccumuloCluster;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.hamcrest.CoreMatchers;
-import org.junit.Assume;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class TableIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI));
-
- AccumuloCluster cluster = getCluster();
- MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
- String rootPath = mac.getConfig().getDir().getAbsolutePath();
-
- Connector c = getConnector();
- TableOperations to = c.tableOperations();
- String tableName = getUniqueNames(1)[0];
- to.create(tableName);
-
- TestIngest.Opts opts = new TestIngest.Opts();
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
- ClientConfiguration clientConfig = getCluster().getClientConfig();
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConfig);
- vopts.updateKerberosCredentials(clientConfig);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- vopts.setPrincipal(getAdminPrincipal());
- }
-
- opts.setTableName(tableName);
- TestIngest.ingest(c, opts, new BatchWriterOpts());
- to.flush(tableName, null, null, true);
- vopts.setTableName(tableName);
- VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
- String id = to.tableIdMap().get(tableName);
- Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(new KeyExtent(new Text(id), null, null).toMetadataRange());
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
- assertTrue(Iterators.size(s.iterator()) > 0);
-
- FileSystem fs = getCluster().getFileSystem();
- assertTrue(fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length > 0);
- to.delete(tableName);
- assertEquals(0, Iterators.size(s.iterator()));
- try {
- assertEquals(0, fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id)).length);
- } catch (FileNotFoundException ex) {
- // that's fine, too
- }
- assertNull(to.tableIdMap().get(tableName));
- to.create(tableName);
- TestIngest.ingest(c, opts, new BatchWriterOpts());
- VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
- to.delete(tableName);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
deleted file mode 100644
index d2b1416..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class TabletIT extends AccumuloClusterHarness {
-
- private static final int N = 1000;
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.TSERV_MAXMEM.getKey(), "128M");
- cfg.setDefaultMemory(256, MemoryUnit.MEGABYTE);
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void createTableTest() throws Exception {
- String tableName = getUniqueNames(1)[0];
- createTableTest(tableName, false);
- createTableTest(tableName, true);
- }
-
- public void createTableTest(String tableName, boolean readOnly) throws Exception {
- // create the test table within accumulo
- Connector connector = getConnector();
-
- if (!readOnly) {
- TreeSet<Text> keys = new TreeSet<Text>();
- for (int i = N / 100; i < N; i += N / 100) {
- keys.add(new Text(String.format("%05d", i)));
- }
-
- // presplit
- connector.tableOperations().create(tableName);
- connector.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "200");
- connector.tableOperations().addSplits(tableName, keys);
- BatchWriter b = connector.createBatchWriter(tableName, new BatchWriterConfig());
-
- // populate
- for (int i = 0; i < N; i++) {
- Mutation m = new Mutation(new Text(String.format("%05d", i)));
- m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes(UTF_8)));
- b.addMutation(m);
- }
- b.close();
- }
-
- Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
- int count = 0;
- for (Entry<Key,Value> elt : scanner) {
- String expected = String.format("%05d", count);
- assert (elt.getKey().getRow().toString().equals(expected));
- count++;
- }
- assertEquals(N, count);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java
deleted file mode 100644
index 0efb1aa..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchDeleter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.master.thrift.MasterState;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.harness.SharedMiniClusterBase;
-import org.apache.accumulo.server.master.state.CurrentState;
-import org.apache.accumulo.server.master.state.MergeInfo;
-import org.apache.accumulo.server.master.state.MetaDataTableScanner;
-import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.master.state.TabletStateChangeIterator;
-import org.apache.accumulo.server.zookeeper.ZooLock;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.base.Predicate;
-import com.google.common.collect.Sets;
-
-/**
- * Test to ensure that the {@link TabletStateChangeIterator} properly skips over tablet information in the metadata table when there is no work to be done on
- * the tablet (see ACCUMULO-3580)
- */
-public class TabletStateChangeIteratorIT extends SharedMiniClusterBase {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void test() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
- String[] tables = getUniqueNames(4);
- final String t1 = tables[0];
- final String t2 = tables[1];
- final String t3 = tables[2];
- final String cloned = tables[3];
-
- // create some metadata
- createTable(t1, true);
- createTable(t2, false);
- createTable(t3, true);
-
- // examine a clone of the metadata table, so we can manipulate it
- cloneMetadataTable(cloned);
-
- assertEquals("No tables should need attention", 0, findTabletsNeedingAttention(cloned));
-
- // test the assigned case (no location)
- removeLocation(cloned, t3);
- assertEquals("Should have one tablet without a loc", 1, findTabletsNeedingAttention(cloned));
-
- // TODO test the cases where the assignment is to a dead tserver
- // TODO test the cases where there is ongoing merges
- // TODO test the bad tablet location state case (active split, inconsistent metadata)
-
- // clean up
- dropTables(t1, t2, t3);
- }
-
- private void removeLocation(String table, String tableNameToModify) throws TableNotFoundException, MutationsRejectedException {
- String tableIdToModify = getConnector().tableOperations().tableIdMap().get(tableNameToModify);
- BatchDeleter deleter = getConnector().createBatchDeleter(table, Authorizations.EMPTY, 1, new BatchWriterConfig());
- deleter.setRanges(Collections.singleton(new KeyExtent(new Text(tableIdToModify), null, null).toMetadataRange()));
- deleter.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
- deleter.delete();
- deleter.close();
- }
-
- private int findTabletsNeedingAttention(String table) throws TableNotFoundException {
- int results = 0;
- Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
- MetaDataTableScanner.configureScanner(scanner, new State());
- scanner.updateScanIteratorOption("tabletChange", "debug", "1");
- for (Entry<Key,Value> e : scanner) {
- if (e != null)
- results++;
- }
- return results;
- }
-
- private void createTable(String t, boolean online) throws AccumuloSecurityException, AccumuloException, TableNotFoundException, TableExistsException {
- Connector conn = getConnector();
- conn.tableOperations().create(t);
- conn.tableOperations().online(t, true);
- if (!online) {
- conn.tableOperations().offline(t, true);
- }
- }
-
- private void cloneMetadataTable(String cloned) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException {
- getConnector().tableOperations().clone(MetadataTable.NAME, cloned, true, null, null);
- }
-
- private void dropTables(String... tables) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
- for (String t : tables) {
- getConnector().tableOperations().delete(t);
- }
- }
-
- private final class State implements CurrentState {
-
- @Override
- public Set<TServerInstance> onlineTabletServers() {
- HashSet<TServerInstance> tservers = new HashSet<TServerInstance>();
- for (String tserver : getConnector().instanceOperations().getTabletServers()) {
- try {
- String zPath = ZooUtil.getRoot(getConnector().getInstance()) + Constants.ZTSERVERS + "/" + tserver;
- long sessionId = ZooLock.getSessionId(new ZooCache(getCluster().getZooKeepers(), getConnector().getInstance().getZooKeepersSessionTimeOut()), zPath);
- tservers.add(new TServerInstance(tserver, sessionId));
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
- return tservers;
- }
-
- @Override
- public Set<String> onlineTables() {
- HashSet<String> onlineTables = new HashSet<String>(getConnector().tableOperations().tableIdMap().values());
- return Sets.filter(onlineTables, new Predicate<String>() {
- @Override
- public boolean apply(String tableId) {
- return Tables.getTableState(getConnector().getInstance(), tableId) == TableState.ONLINE;
- }
- });
- }
-
- @Override
- public Collection<MergeInfo> merges() {
- return Collections.emptySet();
- }
-
- @Override
- public Collection<KeyExtent> migrations() {
- return Collections.emptyList();
- }
-
- @Override
- public MasterState getMasterState() {
- return MasterState.NORMAL;
- }
-
- @Override
- public Set<TServerInstance> shutdownServers() {
- return Collections.emptySet();
- }
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
deleted file mode 100644
index ffadd22..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.fail;
-
-import java.util.Collections;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TimedOutException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.junit.Test;
-
-/**
- *
- */
-public class TimeoutIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 75;
- }
-
- @Test
- public void run() throws Exception {
- Connector conn = getConnector();
- String[] tableNames = getUniqueNames(2);
- testBatchWriterTimeout(conn, tableNames[0]);
- testBatchScannerTimeout(conn, tableNames[1]);
- }
-
- public void testBatchWriterTimeout(Connector conn, String tableName) throws Exception {
- conn.tableOperations().create(tableName);
- conn.tableOperations().addConstraint(tableName, SlowConstraint.class.getName());
-
- // give constraint time to propagate through zookeeper
- UtilWaitThread.sleep(1000);
-
- BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig().setTimeout(3, TimeUnit.SECONDS));
-
- Mutation mut = new Mutation("r1");
- mut.put("cf1", "cq1", "v1");
-
- bw.addMutation(mut);
- try {
- bw.close();
- fail("batch writer did not timeout");
- } catch (MutationsRejectedException mre) {
- if (mre.getCause() instanceof TimedOutException)
- return;
- throw mre;
- }
- }
-
- public void testBatchScannerTimeout(Connector conn, String tableName) throws Exception {
- getConnector().tableOperations().create(tableName);
-
- BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
-
- Mutation m = new Mutation("r1");
- m.put("cf1", "cq1", "v1");
- m.put("cf1", "cq2", "v2");
- m.put("cf1", "cq3", "v3");
- m.put("cf1", "cq4", "v4");
-
- bw.addMutation(m);
- bw.close();
-
- BatchScanner bs = getConnector().createBatchScanner(tableName, Authorizations.EMPTY, 2);
- bs.setRanges(Collections.singletonList(new Range()));
-
- // should not timeout
- for (Entry<Key,Value> entry : bs) {
- entry.getKey();
- }
-
- bs.setTimeout(5, TimeUnit.SECONDS);
- IteratorSetting iterSetting = new IteratorSetting(100, SlowIterator.class);
- iterSetting.addOption("sleepTime", 2000 + "");
- bs.addScanIterator(iterSetting);
-
- try {
- for (Entry<Key,Value> entry : bs) {
- entry.getKey();
- }
- fail("batch scanner did not time out");
- } catch (TimedOutException toe) {
- // toe.printStackTrace();
- }
- bs.close();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java b/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
deleted file mode 100644
index 3d6ad85..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.ByteArraySet;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class VisibilityIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- Authorizations origAuths = null;
-
- @Before
- public void emptyAuths() throws Exception {
- Connector c = getConnector();
- origAuths = c.securityOperations().getUserAuthorizations(getAdminPrincipal());
- }
-
- @After
- public void resetAuths() throws Exception {
- Connector c = getConnector();
- if (null != origAuths) {
- c.securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
- }
- }
-
- @Test
- public void run() throws Exception {
- Connector c = getConnector();
- String[] tableNames = getUniqueNames(2);
- String table = tableNames[0];
- c.tableOperations().create(table);
- String table2 = tableNames[1];
- c.tableOperations().create(table2);
- c.tableOperations().setProperty(table2, Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "DEFLABEL");
-
- insertData(c, table);
- queryData(c, table);
- deleteData(c, table);
-
- insertDefaultData(c, table2);
- queryDefaultData(c, table2);
-
- }
-
- private static SortedSet<String> nss(String... labels) {
- TreeSet<String> ts = new TreeSet<String>();
-
- for (String s : labels) {
- ts.add(s);
- }
-
- return ts;
- }
-
- private void mput(Mutation m, String cf, String cq, String cv, String val) {
- ColumnVisibility le = new ColumnVisibility(cv.getBytes(UTF_8));
- m.put(new Text(cf), new Text(cq), le, new Value(val.getBytes(UTF_8)));
- }
-
- private void mputDelete(Mutation m, String cf, String cq, String cv) {
- ColumnVisibility le = new ColumnVisibility(cv.getBytes(UTF_8));
- m.putDelete(new Text(cf), new Text(cq), le);
- }
-
- private void insertData(Connector c, String tableName) throws Exception {
-
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m1 = new Mutation(new Text("row1"));
-
- mput(m1, "cf1", "cq1", "", "v1");
- mput(m1, "cf1", "cq1", "A", "v2");
- mput(m1, "cf1", "cq1", "B", "v3");
- mput(m1, "cf1", "cq1", "A&B", "v4");
- mput(m1, "cf1", "cq1", "A&(L|M)", "v5");
- mput(m1, "cf1", "cq1", "B&(L|M)", "v6");
- mput(m1, "cf1", "cq1", "A&B&(L|M)", "v7");
- mput(m1, "cf1", "cq1", "A&B&(L)", "v8");
- mput(m1, "cf1", "cq1", "A&FOO", "v9");
- mput(m1, "cf1", "cq1", "A&FOO&(L|M)", "v10");
- mput(m1, "cf1", "cq1", "FOO", "v11");
- mput(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)", "v12");
- mput(m1, "cf1", "cq1", "A&B&(L|M|FOO)", "v13");
-
- bw.addMutation(m1);
- bw.close();
- }
-
- private void deleteData(Connector c, String tableName) throws Exception {
-
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m1 = new Mutation(new Text("row1"));
-
- mputDelete(m1, "cf1", "cq1", "");
- mputDelete(m1, "cf1", "cq1", "A");
- mputDelete(m1, "cf1", "cq1", "A&B");
- mputDelete(m1, "cf1", "cq1", "B&(L|M)");
- mputDelete(m1, "cf1", "cq1", "A&B&(L)");
- mputDelete(m1, "cf1", "cq1", "A&FOO&(L|M)");
- mputDelete(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)");
- mputDelete(m1, "cf1", "cq1", "FOO&A"); // should not delete anything
-
- bw.addMutation(m1);
- bw.close();
-
- Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>();
-
- expected.put(nss("A", "L"), nss("v5"));
- expected.put(nss("A", "M"), nss("v5"));
- expected.put(nss("B"), nss("v3"));
- expected.put(nss("Z"), nss());
- expected.put(nss("A", "B", "L"), nss("v7", "v13"));
- expected.put(nss("A", "B", "M"), nss("v7", "v13"));
- expected.put(nss("A", "B", "FOO"), nss("v13"));
- expected.put(nss("FOO"), nss("v11"));
- expected.put(nss("A", "FOO"), nss("v9"));
-
- queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
- }
-
- private void insertDefaultData(Connector c, String tableName) throws Exception {
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m1 = new Mutation(new Text("row1"));
-
- mput(m1, "cf1", "cq1", "BASE", "v1");
- mput(m1, "cf1", "cq2", "DEFLABEL", "v2");
- mput(m1, "cf1", "cq3", "", "v3");
-
- bw.addMutation(m1);
- bw.close();
- }
-
- private static void uniqueCombos(List<Set<String>> all, Set<String> prefix, Set<String> suffix) {
-
- all.add(prefix);
-
- TreeSet<String> ss = new TreeSet<String>(suffix);
-
- for (String s : suffix) {
- TreeSet<String> ps = new TreeSet<String>(prefix);
- ps.add(s);
- ss.remove(s);
-
- uniqueCombos(all, ps, ss);
- }
- }
-
- private void queryData(Connector c, String tableName) throws Exception {
- Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>();
- expected.put(nss(), nss("v1"));
- expected.put(nss("A"), nss("v2"));
- expected.put(nss("A", "L"), nss("v5"));
- expected.put(nss("A", "M"), nss("v5"));
- expected.put(nss("B"), nss("v3"));
- expected.put(nss("B", "L"), nss("v6"));
- expected.put(nss("B", "M"), nss("v6"));
- expected.put(nss("Z"), nss());
- expected.put(nss("A", "B"), nss("v4"));
- expected.put(nss("A", "B", "L"), nss("v7", "v8", "v13"));
- expected.put(nss("A", "B", "M"), nss("v7", "v13"));
- expected.put(nss("A", "B", "FOO"), nss("v13"));
- expected.put(nss("FOO"), nss("v11"));
- expected.put(nss("A", "FOO"), nss("v9"));
- expected.put(nss("A", "FOO", "L"), nss("v10", "v12"));
- expected.put(nss("A", "FOO", "M"), nss("v10", "v12"));
- expected.put(nss("B", "FOO", "L"), nss("v12"));
- expected.put(nss("B", "FOO", "M"), nss("v12"));
-
- queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
- queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "L", "M", "Z"), expected);
- queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "Z"), expected);
- queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("Z"), expected);
- queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss(), expected);
- }
-
- private void queryData(Connector c, String tableName, Set<String> allAuths, Set<String> userAuths, Map<Set<String>,Set<String>> expected) throws Exception {
-
- c.securityOperations().changeUserAuthorizations(getAdminPrincipal(), new Authorizations(nbas(userAuths)));
-
- ArrayList<Set<String>> combos = new ArrayList<Set<String>>();
- uniqueCombos(combos, nss(), allAuths);
-
- for (Set<String> set1 : combos) {
- Set<String> e = new TreeSet<String>();
- for (Set<String> set2 : combos) {
-
- set2 = new HashSet<String>(set2);
- set2.retainAll(userAuths);
-
- if (set1.containsAll(set2) && expected.containsKey(set2)) {
- e.addAll(expected.get(set2));
- }
- }
-
- set1.retainAll(userAuths);
- verify(c, tableName, set1, e);
- }
-
- }
-
- private void queryDefaultData(Connector c, String tableName) throws Exception {
- Scanner scanner;
-
- // should return no records
- c.securityOperations().changeUserAuthorizations(getAdminPrincipal(), new Authorizations("BASE", "DEFLABEL"));
- scanner = getConnector().createScanner(tableName, new Authorizations());
- verifyDefault(scanner, 0);
-
- // should return one record
- scanner = getConnector().createScanner(tableName, new Authorizations("BASE"));
- verifyDefault(scanner, 1);
-
- // should return all three records
- scanner = getConnector().createScanner(tableName, new Authorizations("BASE", "DEFLABEL"));
- verifyDefault(scanner, 3);
- }
-
- private void verifyDefault(Scanner scanner, int expectedCount) throws Exception {
- int actual = Iterators.size(scanner.iterator());
- if (actual != expectedCount)
- throw new Exception("actual count " + actual + " != expected count " + expectedCount);
- }
-
- private void verify(Connector c, String tableName, Set<String> auths, Set<String> expectedValues) throws Exception {
- ByteArraySet bas = nbas(auths);
-
- try {
- verify(c, tableName, bas, expectedValues.toArray(new String[0]));
- } catch (Exception e) {
- throw new Exception("Verification failed auths=" + auths + " exp=" + expectedValues, e);
- }
- }
-
- private ByteArraySet nbas(Set<String> auths) {
- ByteArraySet bas = new ByteArraySet();
- for (String auth : auths) {
- bas.add(auth.getBytes(UTF_8));
- }
- return bas;
- }
-
- private void verify(Connector c, String tableName, ByteArraySet nss, String... expected) throws Exception {
- Scanner scanner = c.createScanner(tableName, new Authorizations(nss));
- verify(scanner.iterator(), expected);
-
- BatchScanner bs = getConnector().createBatchScanner(tableName, new Authorizations(nss), 3);
- bs.setRanges(Collections.singleton(new Range()));
- verify(bs.iterator(), expected);
- bs.close();
- }
-
- private void verify(Iterator<Entry<Key,Value>> iter, String... expected) throws Exception {
- HashSet<String> valuesSeen = new HashSet<String>();
-
- while (iter.hasNext()) {
- Entry<Key,Value> entry = iter.next();
- if (valuesSeen.contains(entry.getValue().toString())) {
- throw new Exception("Value seen twice");
- }
- valuesSeen.add(entry.getValue().toString());
- }
-
- for (String ev : expected) {
- if (!valuesSeen.remove(ev)) {
- throw new Exception("Did not see expected value " + ev);
- }
- }
-
- if (valuesSeen.size() != 0) {
- throw new Exception("Saw more values than expected " + valuesSeen);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java b/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
deleted file mode 100644
index 34d1c6d..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.apache.accumulo.core.conf.Property.GC_CYCLE_DELAY;
-import static org.apache.accumulo.core.conf.Property.GC_CYCLE_START;
-import static org.apache.accumulo.core.conf.Property.INSTANCE_ZK_TIMEOUT;
-import static org.apache.accumulo.core.conf.Property.TSERV_WALOG_MAX_SIZE;
-import static org.apache.accumulo.core.conf.Property.TSERV_WAL_REPLICATION;
-import static org.apache.accumulo.core.security.Authorizations.EMPTY;
-import static org.apache.accumulo.minicluster.ServerType.GARBAGE_COLLECTOR;
-import static org.apache.accumulo.minicluster.ServerType.TABLET_SERVER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.master.state.SetGoalState;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterControl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.log.WalStateManager;
-import org.apache.accumulo.server.log.WalStateManager.WalState;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class WALSunnyDayIT extends ConfigurableMacBase {
-
- private static final Text CF = new Text(new byte[0]);
-
- @Override
- protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(GC_CYCLE_DELAY, "1s");
- cfg.setProperty(GC_CYCLE_START, "0s");
- cfg.setProperty(TSERV_WALOG_MAX_SIZE, "1M");
- cfg.setProperty(TSERV_WAL_REPLICATION, "1");
- cfg.setProperty(INSTANCE_ZK_TIMEOUT, "3s");
- cfg.setNumTservers(1);
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- int countTrue(Collection<Boolean> bools) {
- int result = 0;
- for (Boolean b : bools) {
- if (b.booleanValue())
- result++;
- }
- return result;
- }
-
- @Test
- public void test() throws Exception {
- MiniAccumuloClusterImpl mac = getCluster();
- MiniAccumuloClusterControl control = mac.getClusterControl();
- control.stop(GARBAGE_COLLECTOR);
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- writeSomeData(c, tableName, 1, 1);
-
- // wal markers are added lazily
- Map<String,Boolean> wals = getWals(c);
- assertEquals(wals.toString(), 2, wals.size());
- for (Boolean b : wals.values()) {
- assertTrue("logs should be in use", b.booleanValue());
- }
-
- // roll log, get a new next
- writeSomeData(c, tableName, 1000, 50);
- Map<String,Boolean> walsAfterRoll = getWals(c);
- assertEquals("should have 3 WALs after roll", 3, walsAfterRoll.size());
- assertTrue("new WALs should be a superset of the old WALs", walsAfterRoll.keySet().containsAll(wals.keySet()));
- assertEquals("all WALs should be in use", 3, countTrue(walsAfterRoll.values()));
-
- // flush the tables
- for (String table : new String[] {tableName, MetadataTable.NAME, RootTable.NAME}) {
- c.tableOperations().flush(table, null, null, true);
- }
- UtilWaitThread.sleep(1000);
- // rolled WAL is no longer in use, but needs to be GC'd
- Map<String,Boolean> walsAfterflush = getWals(c);
- assertEquals(walsAfterflush.toString(), 3, walsAfterflush.size());
- assertEquals("inUse should be 2", 2, countTrue(walsAfterflush.values()));
-
- // let the GC run for a little bit
- control.start(GARBAGE_COLLECTOR);
- UtilWaitThread.sleep(5 * 1000);
- // make sure the unused WAL goes away
- Map<String,Boolean> walsAfterGC = getWals(c);
- assertEquals(walsAfterGC.toString(), 2, walsAfterGC.size());
- control.stop(GARBAGE_COLLECTOR);
- // restart the tserver, but don't run recovery on all tablets
- control.stop(TABLET_SERVER);
- // this delays recovery on the normal tables
- assertEquals(0, cluster.exec(SetGoalState.class, "SAFE_MODE").waitFor());
- control.start(TABLET_SERVER);
-
- // wait for the metadata table to go back online
- getRecoveryMarkers(c);
- // allow a little time for the master to notice ASSIGNED_TO_DEAD_SERVER tablets
- UtilWaitThread.sleep(5 * 1000);
- Map<KeyExtent,List<String>> markers = getRecoveryMarkers(c);
- // log.debug("markers " + markers);
- assertEquals("one tablet should have markers", 1, markers.keySet().size());
- assertEquals("tableId of the keyExtent should be 1", markers.keySet().iterator().next().getTableId(), new Text("1"));
-
- // put some data in the WAL
- assertEquals(0, cluster.exec(SetGoalState.class, "NORMAL").waitFor());
- verifySomeData(c, tableName, 1000 * 50 + 1);
- writeSomeData(c, tableName, 100, 100);
-
- Map<String,Boolean> walsAfterRestart = getWals(c);
- // log.debug("wals after " + walsAfterRestart);
- assertEquals("used WALs after restart should be 4", 4, countTrue(walsAfterRestart.values()));
- control.start(GARBAGE_COLLECTOR);
- UtilWaitThread.sleep(5 * 1000);
- Map<String,Boolean> walsAfterRestartAndGC = getWals(c);
- assertEquals("wals left should be 2", 2, walsAfterRestartAndGC.size());
- assertEquals("logs in use should be 2", 2, countTrue(walsAfterRestartAndGC.values()));
- }
-
- private void verifySomeData(Connector c, String tableName, int expected) throws Exception {
- Scanner scan = c.createScanner(tableName, EMPTY);
- int result = Iterators.size(scan.iterator());
- scan.close();
- Assert.assertEquals(expected, result);
- }
-
- private void writeSomeData(Connector conn, String tableName, int row, int col) throws Exception {
- Random rand = new Random();
- BatchWriter bw = conn.createBatchWriter(tableName, null);
- byte[] rowData = new byte[10];
- byte[] cq = new byte[10];
- byte[] value = new byte[10];
-
- for (int r = 0; r < row; r++) {
- rand.nextBytes(rowData);
- Mutation m = new Mutation(rowData);
- for (int c = 0; c < col; c++) {
- rand.nextBytes(cq);
- rand.nextBytes(value);
- m.put(CF, new Text(cq), new Value(value));
- }
- bw.addMutation(m);
- if (r % 100 == 0) {
- bw.flush();
- }
- }
- bw.close();
- }
-
- private Map<String,Boolean> getWals(Connector c) throws Exception {
- Map<String,Boolean> result = new HashMap<>();
- Instance i = c.getInstance();
- ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
- WalStateManager wals = new WalStateManager(c.getInstance(), zk);
- for (Entry<Path,WalState> entry : wals.getAllState().entrySet()) {
- // WALs are in use if they are not unreferenced
- result.put(entry.getKey().toString(), entry.getValue() != WalState.UNREFERENCED);
- }
- return result;
- }
-
- private Map<KeyExtent,List<String>> getRecoveryMarkers(Connector c) throws Exception {
- Map<KeyExtent,List<String>> result = new HashMap<>();
- Scanner root = c.createScanner(RootTable.NAME, EMPTY);
- root.setRange(TabletsSection.getRange());
- root.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME);
- TabletColumnFamily.PREV_ROW_COLUMN.fetch(root);
-
- Scanner meta = c.createScanner(MetadataTable.NAME, EMPTY);
- meta.setRange(TabletsSection.getRange());
- meta.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME);
- TabletColumnFamily.PREV_ROW_COLUMN.fetch(meta);
-
- List<String> logs = new ArrayList<>();
- Iterator<Entry<Key,Value>> both = Iterators.concat(root.iterator(), meta.iterator());
- while (both.hasNext()) {
- Entry<Key,Value> entry = both.next();
- Key key = entry.getKey();
- if (key.getColumnFamily().equals(TabletsSection.LogColumnFamily.NAME)) {
- logs.add(key.getColumnQualifier().toString());
- }
- if (TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key) && !logs.isEmpty()) {
- KeyExtent extent = new KeyExtent(key.getRow(), entry.getValue());
- result.put(extent, logs);
- logs = new ArrayList<String>();
- }
- }
- return result;
- }
-
-}
[07/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java b/test/src/test/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
deleted file mode 100644
index 07d197d..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertTrue;
-
-import java.net.Socket;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Range;
-import com.google.common.net.HostAndPort;
-
-// ACCUMULO-2757 - make sure we don't make too many more watchers
-public class WatchTheWatchCountIT extends ConfigurableMacBase {
- private static final Logger log = LoggerFactory.getLogger(WatchTheWatchCountIT.class);
-
- public int defaultOverrideSeconds() {
- return 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(3);
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- String[] tableNames = getUniqueNames(3);
- for (String tableName : tableNames) {
- c.tableOperations().create(tableName);
- }
- c.tableOperations().list();
- String zooKeepers = c.getInstance().getZooKeepers();
- final Range<Long> expectedWatcherRange = Range.open(475l, 700l);
- long total = 0;
- final HostAndPort hostAndPort = HostAndPort.fromString(zooKeepers);
- for (int i = 0; i < 5; i++) {
- Socket socket = new Socket(hostAndPort.getHostText(), hostAndPort.getPort());
- try {
- socket.getOutputStream().write("wchs\n".getBytes(), 0, 5);
- byte[] buffer = new byte[1024];
- int n = socket.getInputStream().read(buffer);
- String response = new String(buffer, 0, n);
- total = Long.parseLong(response.split(":")[1].trim());
- log.info("Total: {}", total);
- if (expectedWatcherRange.contains(total)) {
- break;
- }
- log.debug("Expected number of watchers to be contained in {}, but actually was {}. Sleeping and retrying", expectedWatcherRange, total);
- Thread.sleep(5000);
- } finally {
- socket.close();
- }
- }
-
- assertTrue("Expected number of watchers to be contained in " + expectedWatcherRange + ", but actually was " + total, expectedWatcherRange.contains(total));
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java b/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
deleted file mode 100644
index d877969..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Test;
-
-public class WriteAheadLogIT extends AccumuloClusterHarness {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "2M");
- cfg.setProperty(Property.GC_CYCLE_DELAY, "1");
- cfg.setProperty(Property.GC_CYCLE_START, "1");
- cfg.setProperty(Property.MASTER_RECOVERY_DELAY, "1s");
- cfg.setProperty(Property.TSERV_MAJC_DELAY, "1");
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "4s");
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 10 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "750K");
- TestIngest.Opts opts = new TestIngest.Opts();
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
- opts.setTableName(tableName);
-
- ClientConfiguration clientConfig = cluster.getClientConfig();
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConfig);
- vopts.updateKerberosCredentials(clientConfig);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- vopts.setPrincipal(getAdminPrincipal());
- }
-
- TestIngest.ingest(c, opts, new BatchWriterOpts());
- vopts.setTableName(tableName);
- VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
- getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
- VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java
deleted file mode 100644
index 45b671c..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.junit.Test;
-
-public class WriteLotsIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 90;
- }
-
- @Test
- public void writeLots() throws Exception {
- final Connector c = getConnector();
- final String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- final AtomicReference<Exception> ref = new AtomicReference<Exception>();
- List<Thread> threads = new ArrayList<Thread>();
- final ClientConfiguration clientConfig = getCluster().getClientConfig();
- for (int i = 0; i < 10; i++) {
- final int index = i;
- Thread t = new Thread() {
- @Override
- public void run() {
- try {
- TestIngest.Opts opts = new TestIngest.Opts();
- opts.startRow = index * 10000;
- opts.rows = 10000;
- opts.setTableName(tableName);
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConfig);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- }
- TestIngest.ingest(c, opts, new BatchWriterOpts());
- } catch (Exception ex) {
- ref.set(ex);
- }
- }
- };
- t.start();
- threads.add(t);
- }
- for (Thread thread : threads) {
- thread.join();
- }
- if (ref.get() != null) {
- throw ref.get();
- }
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
- vopts.rows = 10000 * 10;
- vopts.setTableName(tableName);
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- vopts.updateKerberosCredentials(clientConfig);
- } else {
- vopts.setPrincipal(getAdminPrincipal());
- }
- VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
deleted file mode 100644
index a531ee0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.commons.io.FileUtils;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class ZooCacheIT extends ConfigurableMacBase {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- private static String pathName = "/zcTest-42";
- private static File testDir;
-
- @BeforeClass
- public static void createTestDirectory() {
- testDir = new File(createTestDir(ZooCacheIT.class.getName()), pathName);
- FileUtils.deleteQuietly(testDir);
- assertTrue(testDir.mkdir());
- }
-
- @Test
- public void test() throws Exception {
- assertEquals(0, exec(CacheTestClean.class, pathName, testDir.getAbsolutePath()).waitFor());
- final AtomicReference<Exception> ref = new AtomicReference<Exception>();
- List<Thread> threads = new ArrayList<Thread>();
- for (int i = 0; i < 3; i++) {
- Thread reader = new Thread() {
- @Override
- public void run() {
- try {
- CacheTestReader.main(new String[] {pathName, testDir.getAbsolutePath(), getConnector().getInstance().getZooKeepers()});
- } catch (Exception ex) {
- ref.set(ex);
- }
- }
- };
- reader.start();
- threads.add(reader);
- }
- assertEquals(0, exec(CacheTestWriter.class, pathName, testDir.getAbsolutePath(), "3", "50").waitFor());
- for (Thread t : threads) {
- t.join();
- if (ref.get() != null)
- throw ref.get();
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java
deleted file mode 100644
index 19f90fe..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-public class ZookeeperRestartIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = new HashMap<String,String>();
- siteConfig.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "3s");
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- c.tableOperations().create("test_ingest");
- BatchWriter bw = c.createBatchWriter("test_ingest", null);
- Mutation m = new Mutation("row");
- m.put("cf", "cq", "value");
- bw.addMutation(m);
- bw.close();
-
- // kill zookeeper
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.ZOOKEEPER))
- cluster.killProcess(ServerType.ZOOKEEPER, proc);
-
- // give the servers time to react
- UtilWaitThread.sleep(1000);
-
- // start zookeeper back up
- cluster.start();
-
- // use the tservers
- Scanner s = c.createScanner("test_ingest", Authorizations.EMPTY);
- Iterator<Entry<Key,Value>> i = s.iterator();
- assertTrue(i.hasNext());
- assertEquals("row", i.next().getKey().getRow().toString());
- assertFalse(i.hasNext());
- // use the master
- c.tableOperations().delete("test_ingest");
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java b/test/src/test/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
deleted file mode 100644
index a0d355e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.performance;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.test.continuous.ContinuousIngest;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class RollWALPerformanceIT extends ConfigurableMacBase {
-
- @Override
- protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.TSERV_WAL_REPLICATION, "1");
- cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "10M");
- cfg.setProperty(Property.TABLE_MINC_LOGS_MAX, "100");
- cfg.setProperty(Property.GC_FILE_ARCHIVE, "false");
- cfg.setProperty(Property.GC_CYCLE_START, "1s");
- cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
- cfg.useMiniDFS(true);
- }
-
- private long ingest() throws Exception {
- final Connector c = getConnector();
- final String tableName = getUniqueNames(1)[0];
-
- log.info("Creating the table");
- c.tableOperations().create(tableName);
-
- log.info("Splitting the table");
- final long SPLIT_COUNT = 100;
- final long distance = Long.MAX_VALUE / SPLIT_COUNT;
- final SortedSet<Text> splits = new TreeSet<Text>();
- for (int i = 1; i < SPLIT_COUNT; i++) {
- splits.add(new Text(String.format("%016x", i * distance)));
- }
- c.tableOperations().addSplits(tableName, splits);
-
- log.info("Waiting for balance");
- c.instanceOperations().waitForBalance();
-
- final Instance inst = c.getInstance();
-
- log.info("Starting ingest");
- final long start = System.currentTimeMillis();
- final String args[] = {"-i", inst.getInstanceName(), "-z", inst.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--batchThreads", "2", "--table",
- tableName, "--num", Long.toString(1000 * 1000), // 1M 100 byte entries
- };
-
- ContinuousIngest.main(args);
- final long result = System.currentTimeMillis() - start;
- log.debug(String.format("Finished in %,d ms", result));
- log.debug("Dropping table");
- c.tableOperations().delete(tableName);
- return result;
- }
-
- private long getAverage() throws Exception {
- final int REPEAT = 3;
- long totalTime = 0;
- for (int i = 0; i < REPEAT; i++) {
- totalTime += ingest();
- }
- return totalTime / REPEAT;
- }
-
- private void testWalPerformanceOnce() throws Exception {
- // get time with a small WAL, which will cause many WAL roll-overs
- long avg1 = getAverage();
- // use a bigger WAL max size to eliminate WAL roll-overs
- Connector c = getConnector();
- c.instanceOperations().setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1G");
- c.tableOperations().flush(MetadataTable.NAME, null, null, true);
- c.tableOperations().flush(RootTable.NAME, null, null, true);
- for (ProcessReference tserver : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) {
- getCluster().killProcess(ServerType.TABLET_SERVER, tserver);
- }
- getCluster().start();
- long avg2 = getAverage();
- log.info(String.format("Average run time with small WAL %,d with large WAL %,d", avg1, avg2));
- assertTrue(avg1 > avg2);
- double percent = (100. * avg1) / avg2;
- log.info(String.format("Percent of large log: %.2f%%", percent));
- assertTrue(percent < 125.);
- }
-
- @Test(timeout = 20 * 60 * 1000)
- public void testWalPerformance() throws Exception {
- testWalPerformanceOnce();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java b/test/src/test/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java
deleted file mode 100644
index 236522a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.performance.metadata;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVWriter;
-import org.apache.accumulo.core.file.rfile.RFile;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-// ACCUMULO-3327
-public class FastBulkImportIT extends ConfigurableMacBase {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Override
- protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(3);
- cfg.setProperty(Property.TSERV_BULK_ASSIGNMENT_THREADS, "5");
- cfg.setProperty(Property.TSERV_BULK_PROCESS_THREADS, "5");
- cfg.setProperty(Property.TABLE_MAJC_RATIO, "9999");
- cfg.setProperty(Property.TABLE_FILE_MAX, "9999");
- }
-
- @Test
- public void test() throws Exception {
- log.info("Creating table");
- final String tableName = getUniqueNames(1)[0];
- final Connector c = getConnector();
- c.tableOperations().create(tableName);
- log.info("Adding splits");
- SortedSet<Text> splits = new TreeSet<>();
- for (int i = 1; i < 0xfff; i += 7) {
- splits.add(new Text(Integer.toHexString(i)));
- }
- c.tableOperations().addSplits(tableName, splits);
-
- log.info("Creating lots of bulk import files");
- FileSystem fs = getCluster().getFileSystem();
- Path basePath = getCluster().getTemporaryPath();
- CachedConfiguration.setInstance(fs.getConf());
-
- Path base = new Path(basePath, "testBulkFail_" + tableName);
- fs.delete(base, true);
- fs.mkdirs(base);
- Path bulkFailures = new Path(base, "failures");
- Path files = new Path(base, "files");
- fs.mkdirs(bulkFailures);
- fs.mkdirs(files);
- for (int i = 0; i < 100; i++) {
- FileSKVWriter writer = FileOperations.getInstance().openWriter(files.toString() + "/bulk_" + i + "." + RFile.EXTENSION, fs, fs.getConf(),
- AccumuloConfiguration.getDefaultConfiguration());
- writer.startDefaultLocalityGroup();
- for (int j = 0x100; j < 0xfff; j += 3) {
- writer.append(new Key(Integer.toHexString(j)), new Value(new byte[0]));
- }
- writer.close();
- }
- log.info("Waiting for balance");
- c.instanceOperations().waitForBalance();
-
- log.info("Bulk importing files");
- long now = System.currentTimeMillis();
- c.tableOperations().importDirectory(tableName, files.toString(), bulkFailures.toString(), true);
- double diffSeconds = (System.currentTimeMillis() - now) / 1000.;
- log.info(String.format("Import took %.2f seconds", diffSeconds));
- assertTrue(diffSeconds < 30);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/proxy/ProxyDurabilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/proxy/ProxyDurabilityIT.java b/test/src/test/java/org/apache/accumulo/test/proxy/ProxyDurabilityIT.java
deleted file mode 100644
index 745326e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/proxy/ProxyDurabilityIT.java
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.proxy;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.nio.ByteBuffer;
-import java.nio.file.Files;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.proxy.Proxy;
-import org.apache.accumulo.proxy.thrift.AccumuloProxy.Client;
-import org.apache.accumulo.proxy.thrift.Column;
-import org.apache.accumulo.proxy.thrift.ColumnUpdate;
-import org.apache.accumulo.proxy.thrift.Condition;
-import org.apache.accumulo.proxy.thrift.ConditionalStatus;
-import org.apache.accumulo.proxy.thrift.ConditionalUpdates;
-import org.apache.accumulo.proxy.thrift.ConditionalWriterOptions;
-import org.apache.accumulo.proxy.thrift.Durability;
-import org.apache.accumulo.proxy.thrift.TimeType;
-import org.apache.accumulo.proxy.thrift.WriterOptions;
-import org.apache.accumulo.server.util.PortUtils;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.thrift.protocol.TJSONProtocol;
-import org.apache.thrift.server.TServer;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-import com.google.common.net.HostAndPort;
-
-public class ProxyDurabilityIT extends ConfigurableMacBase {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "10s");
- cfg.setNumTservers(1);
- }
-
- private static ByteBuffer bytes(String value) {
- return ByteBuffer.wrap(value.getBytes());
- }
-
- @Test
- public void testDurability() throws Exception {
- Connector c = getConnector();
- Properties props = new Properties();
- // Avoid issues with locally installed client configuration files with custom properties
- File emptyFile = Files.createTempFile(null, null).toFile();
- emptyFile.deleteOnExit();
- props.put("instance", c.getInstance().getInstanceName());
- props.put("zookeepers", c.getInstance().getZooKeepers());
- props.put("tokenClass", PasswordToken.class.getName());
- props.put("clientConfigurationFile", emptyFile.toString());
-
- TJSONProtocol.Factory protocol = new TJSONProtocol.Factory();
-
- int proxyPort = PortUtils.getRandomFreePort();
- final TServer proxyServer = Proxy.createProxyServer(HostAndPort.fromParts("localhost", proxyPort), protocol, props).server;
- while (!proxyServer.isServing())
- UtilWaitThread.sleep(100);
- Client client = new TestProxyClient("localhost", proxyPort, protocol).proxy();
- Map<String,String> properties = new TreeMap<String,String>();
- properties.put("password", ROOT_PASSWORD);
- ByteBuffer login = client.login("root", properties);
-
- String tableName = getUniqueNames(1)[0];
- client.createTable(login, tableName, true, TimeType.MILLIS);
- assertTrue(c.tableOperations().exists(tableName));
-
- WriterOptions options = new WriterOptions();
- options.setDurability(Durability.NONE);
- String writer = client.createWriter(login, tableName, options);
- Map<ByteBuffer,List<ColumnUpdate>> cells = new TreeMap<ByteBuffer,List<ColumnUpdate>>();
- ColumnUpdate column = new ColumnUpdate(bytes("cf"), bytes("cq"));
- column.setValue("value".getBytes());
- cells.put(bytes("row"), Collections.singletonList(column));
- client.update(writer, cells);
- client.closeWriter(writer);
- assertEquals(1, count(tableName));
- restartTServer();
- assertEquals(0, count(tableName));
-
- ConditionalWriterOptions cfg = new ConditionalWriterOptions();
- cfg.setDurability(Durability.SYNC);
- String cwriter = client.createConditionalWriter(login, tableName, cfg);
- ConditionalUpdates updates = new ConditionalUpdates();
- updates.addToConditions(new Condition(new Column(bytes("cf"), bytes("cq"), bytes(""))));
- updates.addToUpdates(column);
- Map<ByteBuffer,ConditionalStatus> status = client.updateRowsConditionally(cwriter, Collections.singletonMap(bytes("row"), updates));
- assertEquals(ConditionalStatus.ACCEPTED, status.get(bytes("row")));
- assertEquals(1, count(tableName));
- restartTServer();
- assertEquals(1, count(tableName));
-
- proxyServer.stop();
- }
-
- private void restartTServer() throws Exception {
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
- cluster.start();
- }
-
- private int count(String tableName) throws Exception {
- return Iterators.size((getConnector().createScanner(tableName, Authorizations.EMPTY)).iterator());
- }
-
-}
[37/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/SplitRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/SplitRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/SplitRecoveryIT.java
new file mode 100644
index 0000000..298c761
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/SplitRecoveryIT.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class SplitRecoveryIT extends AccumuloClusterHarness {
+
+ private Mutation m(String row) {
+ Mutation result = new Mutation(row);
+ result.put("cf", "cq", new Value("value".getBytes()));
+ return result;
+ }
+
+ boolean isOffline(String tablename, Connector connector) throws TableNotFoundException {
+ String tableId = connector.tableOperations().tableIdMap().get(tablename);
+ Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ scanner.setRange(new Range(new Text(tableId + ";"), new Text(tableId + "<")));
+ scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+ return Iterators.size(scanner.iterator()) == 0;
+ }
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+
+ String tableName = getUniqueNames(1)[0];
+
+ for (int tn = 0; tn < 2; tn++) {
+
+ Connector connector = getConnector();
+ // create a table and put some data in it
+ connector.tableOperations().create(tableName);
+ BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
+ bw.addMutation(m("a"));
+ bw.addMutation(m("b"));
+ bw.addMutation(m("c"));
+ bw.close();
+ // take the table offline
+ connector.tableOperations().offline(tableName);
+ while (!isOffline(tableName, connector))
+ UtilWaitThread.sleep(200);
+
+ // poke a partial split into the metadata table
+ connector.securityOperations().grantTablePermission(getAdminPrincipal(), MetadataTable.NAME, TablePermission.WRITE);
+ String tableId = connector.tableOperations().tableIdMap().get(tableName);
+
+ KeyExtent extent = new KeyExtent(new Text(tableId), null, new Text("b"));
+ Mutation m = extent.getPrevRowUpdateMutation();
+
+ TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(0.5).getBytes()));
+ TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(null));
+ bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+ bw.addMutation(m);
+
+ if (tn == 1) {
+
+ bw.flush();
+
+ Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ scanner.setRange(extent.toMetadataRange());
+ scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+
+ KeyExtent extent2 = new KeyExtent(new Text(tableId), new Text("b"), null);
+ m = extent2.getPrevRowUpdateMutation();
+ TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t2".getBytes()));
+ TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value("M0".getBytes()));
+
+ for (Entry<Key,Value> entry : scanner) {
+ m.put(DataFileColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
+ }
+
+ bw.addMutation(m);
+ }
+
+ bw.close();
+ // bring the table online
+ connector.tableOperations().online(tableName);
+
+ // verify the tablets went online
+ Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+ int i = 0;
+ String expected[] = {"a", "b", "c"};
+ for (Entry<Key,Value> entry : scanner) {
+ assertEquals(expected[i], entry.getKey().getRow().toString());
+ i++;
+ }
+ assertEquals(3, i);
+
+ connector.tableOperations().delete(tableName);
+
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java b/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
new file mode 100644
index 0000000..1dd964c
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.ArrayList;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.Namespaces;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.server.conf.NamespaceConfiguration;
+import org.apache.accumulo.server.conf.TableConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class TableConfigurationUpdateIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(TableConfigurationUpdateIT.class);
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector conn = getConnector();
+ Instance inst = conn.getInstance();
+
+ String table = getUniqueNames(1)[0];
+ conn.tableOperations().create(table);
+
+ final NamespaceConfiguration defaultConf = new NamespaceConfiguration(Namespaces.DEFAULT_NAMESPACE_ID, inst,
+ AccumuloConfiguration.getDefaultConfiguration());
+
+ // Cache invalidates 25% of the time
+ int randomMax = 4;
+ // Number of threads
+ int numThreads = 2;
+ // Number of iterations per thread
+ int iterations = 100000;
+ AccumuloConfiguration tableConf = new TableConfiguration(inst, table, defaultConf);
+
+ long start = System.currentTimeMillis();
+ ExecutorService svc = Executors.newFixedThreadPool(numThreads);
+ CountDownLatch countDown = new CountDownLatch(numThreads);
+ ArrayList<Future<Exception>> futures = new ArrayList<Future<Exception>>(numThreads);
+
+ for (int i = 0; i < numThreads; i++) {
+ futures.add(svc.submit(new TableConfRunner(randomMax, iterations, tableConf, countDown)));
+ }
+
+ svc.shutdown();
+ Assert.assertTrue(svc.awaitTermination(60, TimeUnit.MINUTES));
+
+ for (Future<Exception> fut : futures) {
+ Exception e = fut.get();
+ if (null != e) {
+ Assert.fail("Thread failed with exception " + e);
+ }
+ }
+
+ long end = System.currentTimeMillis();
+ log.debug(tableConf + " with " + iterations + " iterations and " + numThreads + " threads and cache invalidates " + ((1. / randomMax) * 100.) + "% took "
+ + (end - start) / 1000 + " second(s)");
+ }
+
+ public static class TableConfRunner implements Callable<Exception> {
+ private static final Property prop = Property.TABLE_SPLIT_THRESHOLD;
+ private AccumuloConfiguration tableConf;
+ private CountDownLatch countDown;
+ private int iterations, randMax;
+
+ public TableConfRunner(int randMax, int iterations, AccumuloConfiguration tableConf, CountDownLatch countDown) {
+ this.randMax = randMax;
+ this.iterations = iterations;
+ this.tableConf = tableConf;
+ this.countDown = countDown;
+ }
+
+ @Override
+ public Exception call() {
+ Random r = new Random();
+ countDown.countDown();
+ try {
+ countDown.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ return e;
+ }
+
+ String t = Thread.currentThread().getName() + " ";
+ try {
+ for (int i = 0; i < iterations; i++) {
+ // if (i % 10000 == 0) {
+ // log.info(t + " " + i);
+ // }
+ int choice = r.nextInt(randMax);
+ if (choice < 1) {
+ tableConf.invalidateCache();
+ } else {
+ tableConf.get(prop);
+ }
+ }
+ } catch (Exception e) {
+ log.error(t, e);
+ return e;
+ }
+
+ return null;
+ }
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/TableOperationsIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TableOperationsIT.java b/test/src/main/java/org/apache/accumulo/test/TableOperationsIT.java
new file mode 100644
index 0000000..789b089
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/TableOperationsIT.java
@@ -0,0 +1,375 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.DiskUsage;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.constraints.DefaultKeySizeConstraint;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.test.functional.BadIterator;
+import org.apache.hadoop.io.Text;
+import org.apache.thrift.TException;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+public class TableOperationsIT extends AccumuloClusterHarness {
+
+ static TabletClientService.Client client;
+
+ private Connector connector;
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 30;
+ }
+
+ @Before
+ public void setup() throws Exception {
+ connector = getConnector();
+ }
+
+ @Test
+ public void getDiskUsageErrors() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException, TException {
+ String tableName = getUniqueNames(1)[0];
+ connector.tableOperations().create(tableName);
+ List<DiskUsage> diskUsage = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
+ assertEquals(1, diskUsage.size());
+ assertEquals(0, (long) diskUsage.get(0).getUsage());
+ assertEquals(tableName, diskUsage.get(0).getTables().iterator().next());
+
+ connector.securityOperations().revokeTablePermission(getAdminPrincipal(), tableName, TablePermission.READ);
+ try {
+ connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
+ fail("Should throw securityexception");
+ } catch (AccumuloSecurityException e) {}
+
+ connector.tableOperations().delete(tableName);
+ try {
+ connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
+ fail("Should throw tablenotfound");
+ } catch (TableNotFoundException e) {}
+ }
+
+ @Test
+ public void getDiskUsage() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException, TException {
+ final String[] names = getUniqueNames(2);
+ String tableName = names[0];
+ connector.tableOperations().create(tableName);
+
+ // verify 0 disk usage
+ List<DiskUsage> diskUsages = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
+ assertEquals(1, diskUsages.size());
+ assertEquals(1, diskUsages.get(0).getTables().size());
+ assertEquals(Long.valueOf(0), diskUsages.get(0).getUsage());
+ assertEquals(tableName, diskUsages.get(0).getTables().first());
+
+ // add some data
+ BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("a");
+ m.put("b", "c", new Value("abcde".getBytes()));
+ bw.addMutation(m);
+ bw.flush();
+ bw.close();
+
+ connector.tableOperations().compact(tableName, new Text("A"), new Text("z"), true, true);
+
+ // verify we have usage
+ diskUsages = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
+ assertEquals(1, diskUsages.size());
+ assertEquals(1, diskUsages.get(0).getTables().size());
+ assertTrue(diskUsages.get(0).getUsage() > 0);
+ assertEquals(tableName, diskUsages.get(0).getTables().first());
+
+ String newTable = names[1];
+
+ // clone table
+ connector.tableOperations().clone(tableName, newTable, false, null, null);
+
+ // verify tables are exactly the same
+ Set<String> tables = new HashSet<String>();
+ tables.add(tableName);
+ tables.add(newTable);
+ diskUsages = connector.tableOperations().getDiskUsage(tables);
+ assertEquals(1, diskUsages.size());
+ assertEquals(2, diskUsages.get(0).getTables().size());
+ assertTrue(diskUsages.get(0).getUsage() > 0);
+
+ connector.tableOperations().compact(tableName, new Text("A"), new Text("z"), true, true);
+ connector.tableOperations().compact(newTable, new Text("A"), new Text("z"), true, true);
+
+ // verify tables have differences
+ diskUsages = connector.tableOperations().getDiskUsage(tables);
+ assertEquals(2, diskUsages.size());
+ assertEquals(1, diskUsages.get(0).getTables().size());
+ assertEquals(1, diskUsages.get(1).getTables().size());
+ assertTrue(diskUsages.get(0).getUsage() > 0);
+ assertTrue(diskUsages.get(1).getUsage() > 0);
+
+ connector.tableOperations().delete(tableName);
+ }
+
+ @Test
+ public void createTable() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ String tableName = getUniqueNames(1)[0];
+ connector.tableOperations().create(tableName);
+ Iterable<Map.Entry<String,String>> itrProps = connector.tableOperations().getProperties(tableName);
+ Map<String,String> props = propsToMap(itrProps);
+ assertEquals(DefaultKeySizeConstraint.class.getName(), props.get(Property.TABLE_CONSTRAINT_PREFIX.toString() + "1"));
+ connector.tableOperations().delete(tableName);
+ }
+
+ @Test
+ public void createMergeClonedTable() throws Exception {
+ String[] names = getUniqueNames(2);
+ String originalTable = names[0];
+ TableOperations tops = connector.tableOperations();
+
+ TreeSet<Text> splits = Sets.newTreeSet(Arrays.asList(new Text("a"), new Text("b"), new Text("c"), new Text("d")));
+
+ tops.create(originalTable);
+ tops.addSplits(originalTable, splits);
+
+ BatchWriter bw = connector.createBatchWriter(originalTable, new BatchWriterConfig());
+ for (Text row : splits) {
+ Mutation m = new Mutation(row);
+ for (int i = 0; i < 10; i++) {
+ for (int j = 0; j < 10; j++) {
+ m.put(Integer.toString(i), Integer.toString(j), Integer.toString(i + j));
+ }
+ }
+
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ String clonedTable = names[1];
+
+ tops.clone(originalTable, clonedTable, true, null, null);
+ tops.merge(clonedTable, null, new Text("b"));
+
+ Map<String,Integer> rowCounts = Maps.newHashMap();
+ Scanner s = connector.createScanner(clonedTable, new Authorizations());
+ for (Entry<Key,Value> entry : s) {
+ final Key key = entry.getKey();
+ String row = key.getRow().toString();
+ String cf = key.getColumnFamily().toString(), cq = key.getColumnQualifier().toString();
+ String value = entry.getValue().toString();
+
+ if (rowCounts.containsKey(row)) {
+ rowCounts.put(row, rowCounts.get(row) + 1);
+ } else {
+ rowCounts.put(row, 1);
+ }
+
+ Assert.assertEquals(Integer.parseInt(cf) + Integer.parseInt(cq), Integer.parseInt(value));
+ }
+
+ Collection<Text> clonedSplits = tops.listSplits(clonedTable);
+ Set<Text> expectedSplits = Sets.newHashSet(new Text("b"), new Text("c"), new Text("d"));
+ for (Text clonedSplit : clonedSplits) {
+ Assert.assertTrue("Encountered unexpected split on the cloned table: " + clonedSplit, expectedSplits.remove(clonedSplit));
+ }
+
+ Assert.assertTrue("Did not find all expected splits on the cloned table: " + expectedSplits, expectedSplits.isEmpty());
+ }
+
+ private Map<String,String> propsToMap(Iterable<Map.Entry<String,String>> props) {
+ Map<String,String> map = new HashMap<String,String>();
+ for (Map.Entry<String,String> prop : props) {
+ map.put(prop.getKey(), prop.getValue());
+ }
+ return map;
+ }
+
+ @Test
+ public void testCompactEmptyTableWithGeneratorIterator() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ String tableName = getUniqueNames(1)[0];
+ connector.tableOperations().create(tableName);
+
+ List<IteratorSetting> list = new ArrayList<>();
+ list.add(new IteratorSetting(15, HardListIterator.class));
+ connector.tableOperations().compact(tableName, null, null, list, true, true);
+
+ Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+ Map<Key,Value> actual = new TreeMap<>(COMPARE_KEY_TO_COLQ); // only compare row, colF, colQ
+ for (Map.Entry<Key,Value> entry : scanner)
+ actual.put(entry.getKey(), entry.getValue());
+ assertEquals(HardListIterator.allEntriesToInject, actual);
+ connector.tableOperations().delete(tableName);
+ }
+
+ /** Compare only the row, column family and column qualifier. */
+ static class KeyRowColFColQComparator implements Comparator<Key> {
+ @Override
+ public int compare(Key k1, Key k2) {
+ return k1.compareTo(k2, PartialKey.ROW_COLFAM_COLQUAL);
+ }
+ }
+
+ static final KeyRowColFColQComparator COMPARE_KEY_TO_COLQ = new KeyRowColFColQComparator();
+
+ @Test
+ public void testCompactEmptyTableWithGeneratorIterator_Splits() throws TableExistsException, AccumuloException, AccumuloSecurityException,
+ TableNotFoundException {
+ String tableName = getUniqueNames(1)[0];
+ connector.tableOperations().create(tableName);
+ SortedSet<Text> splitset = new TreeSet<>();
+ splitset.add(new Text("f"));
+ connector.tableOperations().addSplits(tableName, splitset);
+
+ List<IteratorSetting> list = new ArrayList<>();
+ list.add(new IteratorSetting(15, HardListIterator.class));
+ connector.tableOperations().compact(tableName, null, null, list, true, true);
+
+ Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+ Map<Key,Value> actual = new TreeMap<>(COMPARE_KEY_TO_COLQ); // only compare row, colF, colQ
+ for (Map.Entry<Key,Value> entry : scanner)
+ actual.put(entry.getKey(), entry.getValue());
+ assertEquals(HardListIterator.allEntriesToInject, actual);
+ connector.tableOperations().delete(tableName);
+ }
+
+ @Test
+ public void testCompactEmptyTableWithGeneratorIterator_Splits_Cancel() throws TableExistsException, AccumuloException, AccumuloSecurityException,
+ TableNotFoundException {
+ String tableName = getUniqueNames(1)[0];
+ connector.tableOperations().create(tableName);
+ SortedSet<Text> splitset = new TreeSet<>();
+ splitset.add(new Text("f"));
+ connector.tableOperations().addSplits(tableName, splitset);
+
+ List<IteratorSetting> list = new ArrayList<>();
+ list.add(new IteratorSetting(15, HardListIterator.class));
+ connector.tableOperations().compact(tableName, null, null, list, true, false); // don't block
+ connector.tableOperations().cancelCompaction(tableName);
+ // depending on timing, compaction will finish or be canceled
+
+ Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+ Map<Key,Value> actual = new TreeMap<>(COMPARE_KEY_TO_COLQ); // only compare row, colF, colQ
+ for (Map.Entry<Key,Value> entry : scanner)
+ actual.put(entry.getKey(), entry.getValue());
+ switch (actual.size()) {
+ case 3:
+ // Compaction cancel didn't happen in time
+ assertTrue(HardListIterator.allEntriesToInject.equals(actual));
+ break;
+ case 2:
+ // Compacted the first tablet (-inf, f)
+ assertEquals(HardListIterator.allEntriesToInject.headMap(new Key("f")), actual);
+ break;
+ case 1:
+ // Compacted the second tablet [f, +inf)
+ assertEquals(HardListIterator.allEntriesToInject.tailMap(new Key("f")), actual);
+ break;
+ case 0:
+ // Cancelled the compaction before it ran. No generated entries.
+ break;
+ default:
+ Assert.fail("Unexpected number of entries");
+ break;
+ }
+ connector.tableOperations().delete(tableName);
+ }
+
+ @Test
+ public void testCompactEmptyTableWithGeneratorIterator_Splits_Partial() throws TableExistsException, AccumuloException, AccumuloSecurityException,
+ TableNotFoundException {
+ String tableName = getUniqueNames(1)[0];
+ connector.tableOperations().create(tableName);
+ Text splitRow = new Text("f");
+ SortedSet<Text> splitset = new TreeSet<>();
+ splitset.add(splitRow);
+ connector.tableOperations().addSplits(tableName, splitset);
+
+ List<IteratorSetting> list = new ArrayList<>();
+ list.add(new IteratorSetting(15, HardListIterator.class));
+ // compact the second tablet, not the first
+ connector.tableOperations().compact(tableName, splitRow, null, list, true, true);
+
+ Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+ Map<Key,Value> actual = new TreeMap<>(COMPARE_KEY_TO_COLQ); // only compare row, colF, colQ
+ for (Map.Entry<Key,Value> entry : scanner)
+ actual.put(entry.getKey(), entry.getValue());
+ // only expect the entries in the second tablet
+ assertEquals(HardListIterator.allEntriesToInject.tailMap(new Key(splitRow)), actual);
+ connector.tableOperations().delete(tableName);
+ }
+
+ /** Test recovery from bad majc iterator via compaction cancel. */
+ @Test
+ public void testCompactEmptyTablesWithBadIterator_FailsAndCancel() throws TableExistsException, AccumuloException, AccumuloSecurityException,
+ TableNotFoundException {
+ String tableName = getUniqueNames(1)[0];
+ connector.tableOperations().create(tableName);
+
+ List<IteratorSetting> list = new ArrayList<>();
+ list.add(new IteratorSetting(15, BadIterator.class));
+ connector.tableOperations().compact(tableName, null, null, list, true, false); // don't block
+ UtilWaitThread.sleep(2000); // start compaction
+ connector.tableOperations().cancelCompaction(tableName);
+
+ Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+ Map<Key,Value> actual = new TreeMap<>();
+ for (Map.Entry<Key,Value> entry : scanner)
+ actual.put(entry.getKey(), entry.getValue());
+ assertTrue("Should be empty. Actual is " + actual, actual.isEmpty());
+ connector.tableOperations().delete(tableName);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/TabletServerGivesUpIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TabletServerGivesUpIT.java b/test/src/main/java/org/apache/accumulo/test/TabletServerGivesUpIT.java
new file mode 100644
index 0000000..06bf394
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/TabletServerGivesUpIT.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+// ACCUMULO-2480
+public class TabletServerGivesUpIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.useMiniDFS(true);
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ }
+
+ @Test(timeout = 30 * 1000)
+ public void test() throws Exception {
+ final Connector conn = this.getConnector();
+ // Yes, there's a tabletserver
+ assertEquals(1, conn.instanceOperations().getTabletServers().size());
+ final String tableName = getUniqueNames(1)[0];
+ conn.tableOperations().create(tableName);
+ // Kill dfs
+ cluster.getMiniDfs().shutdown();
+ // ask the tserver to do something
+ final AtomicReference<Exception> ex = new AtomicReference<>();
+ Thread splitter = new Thread() {
+ @Override
+ public void run() {
+ try {
+ TreeSet<Text> splits = new TreeSet<>();
+ splits.add(new Text("X"));
+ conn.tableOperations().addSplits(tableName, splits);
+ } catch (Exception e) {
+ ex.set(e);
+ }
+ }
+ };
+ splitter.start();
+ // wait for the tserver to give up on writing to the WAL
+ while (conn.instanceOperations().getTabletServers().size() == 1) {
+ UtilWaitThread.sleep(1000);
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/TotalQueuedIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TotalQueuedIT.java b/test/src/main/java/org/apache/accumulo/test/TotalQueuedIT.java
new file mode 100644
index 0000000..bf2e7f1
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/TotalQueuedIT.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.rpc.ThriftUtil;
+import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MemoryUnit;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.AccumuloServerContext;
+import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import com.google.common.net.HostAndPort;
+
+// see ACCUMULO-1950
+public class TotalQueuedIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ cfg.setDefaultMemory(cfg.getDefaultMemory() * 2, MemoryUnit.BYTE);
+ cfg.useMiniDFS();
+ }
+
+ int SMALL_QUEUE_SIZE = 100000;
+ int LARGE_QUEUE_SIZE = SMALL_QUEUE_SIZE * 10;
+ static final long N = 1000000;
+
+ @Test(timeout = 4 * 60 * 1000)
+ public void test() throws Exception {
+ Random random = new Random();
+ Connector c = getConnector();
+ c.instanceOperations().setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX.getKey(), "" + SMALL_QUEUE_SIZE);
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "9999");
+ c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "999");
+ UtilWaitThread.sleep(1000);
+ // get an idea of how fast the syncs occur
+ byte row[] = new byte[250];
+ BatchWriterConfig cfg = new BatchWriterConfig();
+ cfg.setMaxWriteThreads(10);
+ cfg.setMaxLatency(1, TimeUnit.SECONDS);
+ cfg.setMaxMemory(1024 * 1024);
+ long realSyncs = getSyncs();
+ BatchWriter bw = c.createBatchWriter(tableName, cfg);
+ long now = System.currentTimeMillis();
+ long bytesSent = 0;
+ for (int i = 0; i < N; i++) {
+ random.nextBytes(row);
+ Mutation m = new Mutation(row);
+ m.put("", "", "");
+ bw.addMutation(m);
+ bytesSent += m.estimatedMemoryUsed();
+ }
+ bw.close();
+ long diff = System.currentTimeMillis() - now;
+ double secs = diff / 1000.;
+ double syncs = bytesSent / SMALL_QUEUE_SIZE;
+ double syncsPerSec = syncs / secs;
+ System.out.println(String.format("Sent %d bytes in %f secs approximately %d syncs (%f syncs per sec)", bytesSent, secs, ((long) syncs), syncsPerSec));
+ long update = getSyncs();
+ System.out.println("Syncs " + (update - realSyncs));
+ realSyncs = update;
+
+ // Now with a much bigger total queue
+ c.instanceOperations().setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX.getKey(), "" + LARGE_QUEUE_SIZE);
+ c.tableOperations().flush(tableName, null, null, true);
+ UtilWaitThread.sleep(1000);
+ bw = c.createBatchWriter(tableName, cfg);
+ now = System.currentTimeMillis();
+ bytesSent = 0;
+ for (int i = 0; i < N; i++) {
+ random.nextBytes(row);
+ Mutation m = new Mutation(row);
+ m.put("", "", "");
+ bw.addMutation(m);
+ bytesSent += m.estimatedMemoryUsed();
+ }
+ bw.close();
+ diff = System.currentTimeMillis() - now;
+ secs = diff / 1000.;
+ syncs = bytesSent / LARGE_QUEUE_SIZE;
+ syncsPerSec = syncs / secs;
+ System.out.println(String.format("Sent %d bytes in %f secs approximately %d syncs (%f syncs per sec)", bytesSent, secs, ((long) syncs), syncsPerSec));
+ update = getSyncs();
+ System.out.println("Syncs " + (update - realSyncs));
+ assertTrue(update - realSyncs < realSyncs);
+ }
+
+ private long getSyncs() throws Exception {
+ Connector c = getConnector();
+ ServerConfigurationFactory confFactory = new ServerConfigurationFactory(c.getInstance());
+ AccumuloServerContext context = new AccumuloServerContext(confFactory);
+ for (String address : c.instanceOperations().getTabletServers()) {
+ TabletClientService.Client client = ThriftUtil.getTServerClient(HostAndPort.fromString(address), context);
+ TabletServerStatus status = client.getTabletServerStatus(null, context.rpcCreds());
+ return status.syncs;
+ }
+ return 0;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java b/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
new file mode 100644
index 0000000..1c6e3df
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.trace.DistributedTrace;
+import org.apache.accumulo.core.trace.Span;
+import org.apache.accumulo.core.trace.Trace;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.accumulo.tracer.TraceDump;
+import org.apache.accumulo.tracer.TraceDump.Printer;
+import org.apache.accumulo.tracer.TraceServer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class TracerRecoversAfterOfflineTableIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
+ cfg.setNumTservers(1);
+ }
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Process tracer = null;
+ Connector conn = getConnector();
+ if (!conn.tableOperations().exists("trace")) {
+ MiniAccumuloClusterImpl mac = cluster;
+ tracer = mac.exec(TraceServer.class);
+ while (!conn.tableOperations().exists("trace")) {
+ UtilWaitThread.sleep(1000);
+ }
+ UtilWaitThread.sleep(5000);
+ }
+
+ log.info("Taking table offline");
+ conn.tableOperations().offline("trace", true);
+
+ String tableName = getUniqueNames(1)[0];
+ conn.tableOperations().create(tableName);
+
+ log.info("Start a distributed trace span");
+
+ DistributedTrace.enable("localhost", "testTrace", getClientConfig());
+ Span root = Trace.on("traceTest");
+ BatchWriter bw = conn.createBatchWriter(tableName, null);
+ Mutation m = new Mutation("m");
+ m.put("a", "b", "c");
+ bw.addMutation(m);
+ bw.close();
+ root.stop();
+
+ log.info("Bringing trace table back online");
+ conn.tableOperations().online("trace", true);
+
+ log.info("Trace table is online, should be able to find trace");
+
+ final Scanner scanner = conn.createScanner("trace", Authorizations.EMPTY);
+ scanner.setRange(new Range(new Text(Long.toHexString(root.traceId()))));
+ while (true) {
+ final StringBuffer finalBuffer = new StringBuffer();
+ int traceCount = TraceDump.printTrace(scanner, new Printer() {
+ @Override
+ public void print(final String line) {
+ try {
+ finalBuffer.append(line).append("\n");
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ });
+ String traceOutput = finalBuffer.toString();
+ log.info("Trace output:" + traceOutput);
+ if (traceCount > 0) {
+ int lastPos = 0;
+ for (String part : "traceTest,close,binMutations".split(",")) {
+ log.info("Looking in trace output for '" + part + "'");
+ int pos = traceOutput.indexOf(part);
+ assertTrue("Did not find '" + part + "' in output", pos > 0);
+ assertTrue("'" + part + "' occurred earlier than the previous element unexpectedly", pos > lastPos);
+ lastPos = pos;
+ }
+ break;
+ } else {
+ log.info("Ignoring trace output as traceCount not greater than zero: " + traceCount);
+ Thread.sleep(1000);
+ }
+ }
+ if (tracer != null) {
+ tracer.destroy();
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/TransportCachingIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TransportCachingIT.java b/test/src/main/java/org/apache/accumulo/test/TransportCachingIT.java
new file mode 100644
index 0000000..9cc3dc0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/TransportCachingIT.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static com.google.common.base.Charsets.UTF_8;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.impl.ThriftTransportKey;
+import org.apache.accumulo.core.client.impl.ThriftTransportPool;
+import org.apache.accumulo.core.conf.DefaultConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.ServerServices;
+import org.apache.accumulo.core.util.ServerServices.Service;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.ZooCache;
+import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Test that {@link ThriftTransportPool} actually adheres to the cachedConnection argument
+ */
+public class TransportCachingIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(TransportCachingIT.class);
+
+ @Test
+ public void testCachedTransport() {
+ Connector conn = getConnector();
+ Instance instance = conn.getInstance();
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ ClientContext context = new ClientContext(instance, new Credentials(getAdminPrincipal(), getAdminToken()), clientConf);
+ long rpcTimeout = DefaultConfiguration.getTimeInMillis(Property.GENERAL_RPC_TIMEOUT.getDefaultValue());
+
+ // create list of servers
+ ArrayList<ThriftTransportKey> servers = new ArrayList<ThriftTransportKey>();
+
+ // add tservers
+ ZooCache zc = new ZooCacheFactory().getZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
+ for (String tserver : zc.getChildren(ZooUtil.getRoot(instance) + Constants.ZTSERVERS)) {
+ String path = ZooUtil.getRoot(instance) + Constants.ZTSERVERS + "/" + tserver;
+ byte[] data = ZooUtil.getLockData(zc, path);
+ if (data != null) {
+ String strData = new String(data, UTF_8);
+ if (!strData.equals("master"))
+ servers.add(new ThriftTransportKey(new ServerServices(strData).getAddress(Service.TSERV_CLIENT), rpcTimeout, context));
+ }
+ }
+
+ ThriftTransportPool pool = ThriftTransportPool.getInstance();
+ TTransport first = null;
+ while (null == first) {
+ try {
+ // Get a transport (cached or not)
+ first = pool.getAnyTransport(servers, true).getSecond();
+ } catch (TTransportException e) {
+ log.warn("Failed to obtain transport to " + servers);
+ }
+ }
+
+ assertNotNull(first);
+ // Return it to unreserve it
+ pool.returnTransport(first);
+
+ TTransport second = null;
+ while (null == second) {
+ try {
+ // Get a cached transport (should be the first)
+ second = pool.getAnyTransport(servers, true).getSecond();
+ } catch (TTransportException e) {
+ log.warn("Failed obtain 2nd transport to " + servers);
+ }
+ }
+
+ // We should get the same transport
+ assertTrue("Expected the first and second to be the same instance", first == second);
+ // Return the 2nd
+ pool.returnTransport(second);
+
+ TTransport third = null;
+ while (null == third) {
+ try {
+ // Get a non-cached transport
+ third = pool.getAnyTransport(servers, false).getSecond();
+ } catch (TTransportException e) {
+ log.warn("Failed obtain 2nd transport to " + servers);
+ }
+ }
+
+ assertFalse("Expected second and third transport to be different instances", second == third);
+ pool.returnTransport(third);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/UnusedWALIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/UnusedWALIT.java b/test/src/main/java/org/apache/accumulo/test/UnusedWALIT.java
new file mode 100644
index 0000000..281c358
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/UnusedWALIT.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.UUID;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.log.WalStateManager;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+// When reviewing the changes for ACCUMULO-3423, kturner suggested
+// "tablets will now have log references that contain no data,
+// so it may be marked with 3 WALs, the first with data, the 2nd without, a 3rd with data.
+// It would be useful to have an IT that will test this situation.
+public class UnusedWALIT extends ConfigurableMacBase {
+
+ private ZooReaderWriter zk;
+
+ @Override
+ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ final long logSize = 1024 * 1024 * 10;
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, Long.toString(logSize));
+ cfg.setNumTservers(1);
+ // use raw local file system so walogs sync and flush will work
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ hadoopCoreSite.set("fs.namenode.fs-limits.min-block-size", Long.toString(logSize));
+ }
+
+ @Test(timeout = 2 * 60 * 1000)
+ public void test() throws Exception {
+ // don't want this bad boy cleaning up walog entries
+ getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
+
+ // make two tables
+ String[] tableNames = getUniqueNames(2);
+ String bigTable = tableNames[0];
+ String lilTable = tableNames[1];
+ Connector c = getConnector();
+ c.tableOperations().create(bigTable);
+ c.tableOperations().create(lilTable);
+
+ Instance i = c.getInstance();
+ zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
+
+ // put some data in a log that should be replayed for both tables
+ writeSomeData(c, bigTable, 0, 10, 0, 10);
+ scanSomeData(c, bigTable, 0, 10, 0, 10);
+ writeSomeData(c, lilTable, 0, 1, 0, 1);
+ scanSomeData(c, lilTable, 0, 1, 0, 1);
+ assertEquals(2, getWALCount(i, zk));
+
+ // roll the logs by pushing data into bigTable
+ writeSomeData(c, bigTable, 0, 3000, 0, 1000);
+ assertEquals(3, getWALCount(i, zk));
+
+ // put some data in the latest log
+ writeSomeData(c, lilTable, 1, 10, 0, 10);
+ scanSomeData(c, lilTable, 1, 10, 0, 10);
+
+ // bounce the tserver
+ getCluster().getClusterControl().stop(ServerType.TABLET_SERVER);
+ getCluster().getClusterControl().start(ServerType.TABLET_SERVER);
+
+ // wait for the metadata table to be online
+ Iterators.size(c.createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
+
+ // check our two sets of data in different logs
+ scanSomeData(c, lilTable, 0, 1, 0, 1);
+ scanSomeData(c, lilTable, 1, 10, 0, 10);
+ }
+
+ private void scanSomeData(Connector c, String table, int startRow, int rowCount, int startCol, int colCount) throws Exception {
+ Scanner s = c.createScanner(table, Authorizations.EMPTY);
+ s.setRange(new Range(Integer.toHexString(startRow), Integer.toHexString(startRow + rowCount)));
+ int row = startRow;
+ int col = startCol;
+ for (Entry<Key,Value> entry : s) {
+ assertEquals(row, Integer.parseInt(entry.getKey().getRow().toString(), 16));
+ assertEquals(col++, Integer.parseInt(entry.getKey().getColumnQualifier().toString(), 16));
+ if (col == startCol + colCount) {
+ col = startCol;
+ row++;
+ if (row == startRow + rowCount) {
+ break;
+ }
+ }
+ }
+ assertEquals(row, startRow + rowCount);
+ }
+
+ private int getWALCount(Instance i, ZooReaderWriter zk) throws Exception {
+ WalStateManager wals = new WalStateManager(i, zk);
+ int result = 0;
+ for (Entry<TServerInstance,List<UUID>> entry : wals.getAllMarkers().entrySet()) {
+ result += entry.getValue().size();
+ }
+ return result;
+ }
+
+ private void writeSomeData(Connector conn, String table, int startRow, int rowCount, int startCol, int colCount) throws Exception {
+ BatchWriterConfig config = new BatchWriterConfig();
+ config.setMaxMemory(10 * 1024 * 1024);
+ BatchWriter bw = conn.createBatchWriter(table, config);
+ for (int r = startRow; r < startRow + rowCount; r++) {
+ Mutation m = new Mutation(Integer.toHexString(r));
+ for (int c = startCol; c < startCol + colCount; c++) {
+ m.put("", Integer.toHexString(c), "");
+ }
+ bw.addMutation(m);
+ }
+ bw.close();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java b/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
new file mode 100644
index 0000000..fa9e642
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
@@ -0,0 +1,296 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.test;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.CompactionConfig;
+import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.user.RegExFilter;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.test.functional.FunctionalTestUtils;
+import org.apache.accumulo.test.functional.SlowIterator;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+
+public class UserCompactionStrategyIT extends AccumuloClusterHarness {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 3 * 60;
+ }
+
+ @Test
+ public void testDropA() throws Exception {
+ Connector c = getConnector();
+
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+
+ writeFlush(c, tableName, "a");
+ writeFlush(c, tableName, "b");
+ // create a file that starts with A containing rows 'a' and 'b'
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
+
+ writeFlush(c, tableName, "c");
+ writeFlush(c, tableName, "d");
+
+ // drop files that start with A
+ CompactionStrategyConfig csConfig = new CompactionStrategyConfig(TestCompactionStrategy.class.getName());
+ csConfig.setOptions(ImmutableMap.of("dropPrefix", "A", "inputPrefix", "F"));
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
+
+ Assert.assertEquals(ImmutableSet.of("c", "d"), getRows(c, tableName));
+
+ // this compaction should not drop files starting with A
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
+
+ Assert.assertEquals(ImmutableSet.of("c", "d"), getRows(c, tableName));
+ }
+
+ private void testDropNone(Map<String,String> options) throws Exception {
+
+ Connector c = getConnector();
+
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+
+ writeFlush(c, tableName, "a");
+ writeFlush(c, tableName, "b");
+
+ CompactionStrategyConfig csConfig = new CompactionStrategyConfig(TestCompactionStrategy.class.getName());
+ csConfig.setOptions(options);
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
+
+ Assert.assertEquals(ImmutableSet.of("a", "b"), getRows(c, tableName));
+ }
+
+ @Test
+ public void testDropNone() throws Exception {
+ // test a compaction strategy that selects no files. In this case there is no work to do, want to ensure it does not hang.
+
+ testDropNone(ImmutableMap.of("inputPrefix", "Z"));
+ }
+
+ @Test
+ public void testDropNone2() throws Exception {
+ // test a compaction strategy that selects no files. This differs testDropNone() in that shouldCompact() will return true and getCompactionPlan() will
+ // return no work to do.
+
+ testDropNone(ImmutableMap.of("inputPrefix", "Z", "shouldCompact", "true"));
+ }
+
+ @Test
+ public void testPerTableClasspath() throws Exception {
+ // Can't assume that a test-resource will be on the server's classpath
+ Assume.assumeTrue(ClusterType.MINI == getClusterType());
+
+ // test pertable classpath + user specified compaction strat
+
+ final Connector c = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.instanceOperations().setProperty(Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "context1",
+ System.getProperty("user.dir") + "/src/test/resources/TestCompactionStrat.jar");
+ c.tableOperations().setProperty(tableName, Property.TABLE_CLASSPATH.getKey(), "context1");
+
+ c.tableOperations().addSplits(tableName, new TreeSet<Text>(Arrays.asList(new Text("efg"))));
+
+ writeFlush(c, tableName, "a");
+ writeFlush(c, tableName, "b");
+
+ writeFlush(c, tableName, "h");
+ writeFlush(c, tableName, "i");
+
+ Assert.assertEquals(4, FunctionalTestUtils.countRFiles(c, tableName));
+
+ // EfgCompactionStrat will only compact a tablet w/ end row of 'efg'. No other tablets are compacted.
+ CompactionStrategyConfig csConfig = new CompactionStrategyConfig("org.apache.accumulo.test.EfgCompactionStrat");
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
+
+ Assert.assertEquals(3, FunctionalTestUtils.countRFiles(c, tableName));
+
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
+
+ Assert.assertEquals(2, FunctionalTestUtils.countRFiles(c, tableName));
+ }
+
+ @Test
+ public void testIterators() throws Exception {
+ // test compaction strategy + iterators
+
+ Connector c = getConnector();
+
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+
+ writeFlush(c, tableName, "a");
+ writeFlush(c, tableName, "b");
+ // create a file that starts with A containing rows 'a' and 'b'
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
+
+ writeFlush(c, tableName, "c");
+ writeFlush(c, tableName, "d");
+
+ Assert.assertEquals(3, FunctionalTestUtils.countRFiles(c, tableName));
+
+ // drop files that start with A
+ CompactionStrategyConfig csConfig = new CompactionStrategyConfig(TestCompactionStrategy.class.getName());
+ csConfig.setOptions(ImmutableMap.of("inputPrefix", "F"));
+
+ IteratorSetting iterConf = new IteratorSetting(21, "myregex", RegExFilter.class);
+ RegExFilter.setRegexs(iterConf, "a|c", null, null, null, false);
+
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig).setIterators(Arrays.asList(iterConf)));
+
+ // compaction strategy should only be applied to one file. If its applied to both, then row 'b' would be dropped by filter.
+ Assert.assertEquals(ImmutableSet.of("a", "b", "c"), getRows(c, tableName));
+
+ Assert.assertEquals(2, FunctionalTestUtils.countRFiles(c, tableName));
+
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
+
+ // ensure that iterator is not applied
+ Assert.assertEquals(ImmutableSet.of("a", "b", "c"), getRows(c, tableName));
+
+ Assert.assertEquals(1, FunctionalTestUtils.countRFiles(c, tableName));
+ }
+
+ @Test
+ public void testFileSize() throws Exception {
+ Connector c = getConnector();
+
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+
+ // write random data because its very unlikely it will compress
+ writeRandomValue(c, tableName, 1 << 16);
+ writeRandomValue(c, tableName, 1 << 16);
+
+ writeRandomValue(c, tableName, 1 << 9);
+ writeRandomValue(c, tableName, 1 << 7);
+ writeRandomValue(c, tableName, 1 << 6);
+
+ Assert.assertEquals(5, FunctionalTestUtils.countRFiles(c, tableName));
+
+ CompactionStrategyConfig csConfig = new CompactionStrategyConfig(SizeCompactionStrategy.class.getName());
+ csConfig.setOptions(ImmutableMap.of("size", "" + (1 << 15)));
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
+
+ Assert.assertEquals(3, FunctionalTestUtils.countRFiles(c, tableName));
+
+ csConfig = new CompactionStrategyConfig(SizeCompactionStrategy.class.getName());
+ csConfig.setOptions(ImmutableMap.of("size", "" + (1 << 17)));
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
+
+ Assert.assertEquals(1, FunctionalTestUtils.countRFiles(c, tableName));
+
+ }
+
+ @Test
+ public void testConcurrent() throws Exception {
+ // two compactions without iterators or strategy should be able to run concurrently
+
+ Connector c = getConnector();
+
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+
+ // write random data because its very unlikely it will compress
+ writeRandomValue(c, tableName, 1 << 16);
+ writeRandomValue(c, tableName, 1 << 16);
+
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(false));
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
+
+ Assert.assertEquals(1, FunctionalTestUtils.countRFiles(c, tableName));
+
+ writeRandomValue(c, tableName, 1 << 16);
+
+ IteratorSetting iterConfig = new IteratorSetting(30, SlowIterator.class);
+ SlowIterator.setSleepTime(iterConfig, 1000);
+
+ long t1 = System.currentTimeMillis();
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(false).setIterators(Arrays.asList(iterConfig)));
+ try {
+ // this compaction should fail because previous one set iterators
+ c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
+ if (System.currentTimeMillis() - t1 < 2000)
+ Assert.fail("Expected compaction to fail because another concurrent compaction set iterators");
+ } catch (AccumuloException e) {}
+ }
+
+ void writeRandomValue(Connector c, String tableName, int size) throws Exception {
+ Random rand = new Random();
+
+ byte data1[] = new byte[size];
+ rand.nextBytes(data1);
+
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+
+ Mutation m1 = new Mutation("r" + rand.nextInt(909090));
+ m1.put("data", "bl0b", new Value(data1));
+
+ bw.addMutation(m1);
+ bw.close();
+ c.tableOperations().flush(tableName, null, null, true);
+ }
+
+ private Set<String> getRows(Connector c, String tableName) throws TableNotFoundException {
+ Set<String> rows = new HashSet<String>();
+ Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
+
+ for (Entry<Key,Value> entry : scanner)
+ rows.add(entry.getKey().getRowData().toString());
+ return rows;
+
+ }
+
+ private void writeFlush(Connector conn, String tablename, String row) throws Exception {
+ BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
+ Mutation m = new Mutation(row);
+ m.put("", "", "");
+ bw.addMutation(m);
+ bw.close();
+ conn.tableOperations().flush(tablename, null, null, true);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/UsersIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/UsersIT.java b/test/src/main/java/org/apache/accumulo/test/UsersIT.java
new file mode 100644
index 0000000..131f042
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/UsersIT.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.Set;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.security.SecurityErrorCode;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.junit.Test;
+
+public class UsersIT extends AccumuloClusterHarness {
+
+ @Test
+ public void testCreateExistingUser() throws Exception {
+ ClusterUser user0 = getUser(0);
+ Connector conn = getConnector();
+ Set<String> currentUsers = conn.securityOperations().listLocalUsers();
+
+ // Ensure that the user exists
+ if (!currentUsers.contains(user0.getPrincipal())) {
+ PasswordToken token = null;
+ if (!getCluster().getClientConfig().getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ token = new PasswordToken(user0.getPassword());
+ }
+ conn.securityOperations().createLocalUser(user0.getPrincipal(), token);
+ }
+
+ try {
+ conn.securityOperations().createLocalUser(user0.getPrincipal(), new PasswordToken("better_fail"));
+ fail("Creating a user that already exists should throw an exception");
+ } catch (AccumuloSecurityException e) {
+ assertTrue("Expected USER_EXISTS error", SecurityErrorCode.USER_EXISTS == e.getSecurityErrorCode());
+ String msg = e.getMessage();
+ assertTrue("Error message didn't contain principal: '" + msg + "'", msg.contains(user0.getPrincipal()));
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java
new file mode 100644
index 0000000..6a90730
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.accumulo.test.functional.FunctionalTestUtils;
+import org.apache.accumulo.tserver.TabletServer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class VerifySerialRecoveryIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "3s");
+ cfg.setProperty(Property.TSERV_ASSIGNMENT_MAXCONCURRENT, "20");
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Test(timeout = 4 * 60 * 1000)
+ public void testSerializedRecovery() throws Exception {
+ // make a table with many splits
+ String tableName = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ c.tableOperations().create(tableName);
+ SortedSet<Text> splits = new TreeSet<Text>();
+ for (int i = 0; i < 200; i++) {
+ splits.add(new Text(AssignmentThreadsIT.randomHex(8)));
+ }
+ c.tableOperations().addSplits(tableName, splits);
+ // load data to give the recovery something to do
+ BatchWriter bw = c.createBatchWriter(tableName, null);
+ for (int i = 0; i < 50000; i++) {
+ Mutation m = new Mutation(AssignmentThreadsIT.randomHex(8));
+ m.put("", "", "");
+ bw.addMutation(m);
+ }
+ bw.close();
+ // kill the tserver
+ for (ProcessReference ref : getCluster().getProcesses().get(ServerType.TABLET_SERVER))
+ getCluster().killProcess(ServerType.TABLET_SERVER, ref);
+ final Process ts = cluster.exec(TabletServer.class);
+
+ // wait for recovery
+ Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator());
+ assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+ ts.waitFor();
+ String result = FunctionalTestUtils.readAll(cluster, TabletServer.class, ts);
+ for (String line : result.split("\n")) {
+ System.out.println(line);
+ }
+ // walk through the output, verifying that only a single normal recovery was running at one time
+ boolean started = false;
+ int recoveries = 0;
+ for (String line : result.split("\n")) {
+ // ignore metadata tables
+ if (line.contains("!0") || line.contains("+r"))
+ continue;
+ if (line.contains("Starting Write-Ahead Log")) {
+ assertFalse(started);
+ started = true;
+ recoveries++;
+ }
+ if (line.contains("Write-Ahead Log recovery complete")) {
+ assertTrue(started);
+ started = false;
+ }
+ }
+ assertFalse(started);
+ assertTrue(recoveries > 0);
+ }
+}
[26/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/proxy/TBinaryProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TBinaryProxyIT.java b/test/src/main/java/org/apache/accumulo/test/proxy/TBinaryProxyIT.java
new file mode 100644
index 0000000..6359d1e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TBinaryProxyIT.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.proxy;
+
+import org.apache.thrift.protocol.TBinaryProtocol;
+import org.junit.BeforeClass;
+
+/**
+ *
+ */
+public class TBinaryProxyIT extends SimpleProxyBase {
+
+ @BeforeClass
+ public static void setProtocol() throws Exception {
+ SimpleProxyBase.factory = new TBinaryProtocol.Factory();
+ setUpProxy();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/proxy/TCompactProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TCompactProxyIT.java b/test/src/main/java/org/apache/accumulo/test/proxy/TCompactProxyIT.java
new file mode 100644
index 0000000..a92414a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TCompactProxyIT.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.proxy;
+
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.junit.BeforeClass;
+
+/**
+ *
+ */
+public class TCompactProxyIT extends SimpleProxyBase {
+
+ @BeforeClass
+ public static void setProtocol() throws Exception {
+ SimpleProxyBase.factory = new TCompactProtocol.Factory();
+ setUpProxy();
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/proxy/TJsonProtocolProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TJsonProtocolProxyIT.java b/test/src/main/java/org/apache/accumulo/test/proxy/TJsonProtocolProxyIT.java
new file mode 100644
index 0000000..5fcbf53
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TJsonProtocolProxyIT.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.proxy;
+
+import org.apache.thrift.protocol.TJSONProtocol;
+import org.junit.BeforeClass;
+
+/**
+ *
+ */
+public class TJsonProtocolProxyIT extends SimpleProxyBase {
+
+ @BeforeClass
+ public static void setProtocol() throws Exception {
+ SimpleProxyBase.factory = new TJSONProtocol.Factory();
+ setUpProxy();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/proxy/TTupleProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TTupleProxyIT.java b/test/src/main/java/org/apache/accumulo/test/proxy/TTupleProxyIT.java
new file mode 100644
index 0000000..cdecf2c
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TTupleProxyIT.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.proxy;
+
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.junit.BeforeClass;
+
+/**
+ *
+ */
+public class TTupleProxyIT extends SimpleProxyBase {
+
+ @BeforeClass
+ public static void setProtocol() throws Exception {
+ SimpleProxyBase.factory = new TTupleProtocol.Factory();
+ setUpProxy();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyClient.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyClient.java b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyClient.java
new file mode 100644
index 0000000..ff92795
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyClient.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.proxy;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import javax.security.sasl.SaslException;
+
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.iterators.user.RegExFilter;
+import org.apache.accumulo.core.rpc.UGIAssumingTransport;
+import org.apache.accumulo.proxy.Util;
+import org.apache.accumulo.proxy.thrift.AccumuloProxy;
+import org.apache.accumulo.proxy.thrift.ColumnUpdate;
+import org.apache.accumulo.proxy.thrift.Key;
+import org.apache.accumulo.proxy.thrift.ScanResult;
+import org.apache.accumulo.proxy.thrift.TimeType;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.protocol.TProtocolFactory;
+import org.apache.thrift.transport.TFramedTransport;
+import org.apache.thrift.transport.TSaslClientTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+
+public class TestProxyClient {
+
+ protected AccumuloProxy.Client proxy;
+ protected TTransport transport;
+
+ public TestProxyClient(String host, int port) throws TTransportException {
+ this(host, port, new TCompactProtocol.Factory());
+ }
+
+ public TestProxyClient(String host, int port, TProtocolFactory protoFactory) throws TTransportException {
+ final TSocket socket = new TSocket(host, port);
+ socket.setTimeout(600000);
+ transport = new TFramedTransport(socket);
+ final TProtocol protocol = protoFactory.getProtocol(transport);
+ proxy = new AccumuloProxy.Client(protocol);
+ transport.open();
+ }
+
+ public TestProxyClient(String host, int port, TProtocolFactory protoFactory, String proxyPrimary, UserGroupInformation ugi) throws SaslException,
+ TTransportException {
+ TSocket socket = new TSocket(host, port);
+ TSaslClientTransport saslTransport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, host, Collections.singletonMap("javax.security.sasl.qop",
+ "auth"), null, socket);
+
+ transport = new UGIAssumingTransport(saslTransport, ugi);
+
+ // UGI transport will perform the doAs for us
+ transport.open();
+
+ AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
+ final TProtocol protocol = protoFactory.getProtocol(transport);
+ proxy = factory.getClient(protocol);
+ }
+
+ public synchronized void close() {
+ if (null != transport) {
+ transport.close();
+ transport = null;
+ }
+ }
+
+ public AccumuloProxy.Client proxy() {
+ return proxy;
+ }
+
+ public static void main(String[] args) throws Exception {
+
+ TestProxyClient tpc = new TestProxyClient("localhost", 42424);
+ String principal = "root";
+ Map<String,String> props = new TreeMap<String,String>();
+ props.put("password", "secret");
+
+ System.out.println("Logging in");
+ ByteBuffer login = tpc.proxy.login(principal, props);
+
+ System.out.println("Creating user: ");
+ if (!tpc.proxy().listLocalUsers(login).contains("testuser")) {
+ tpc.proxy().createLocalUser(login, "testuser", ByteBuffer.wrap("testpass".getBytes(UTF_8)));
+ }
+ System.out.println("UserList: " + tpc.proxy().listLocalUsers(login));
+
+ System.out.println("Listing: " + tpc.proxy().listTables(login));
+
+ System.out.println("Deleting: ");
+ String testTable = "testtableOMGOMGOMG";
+
+ System.out.println("Creating: ");
+
+ if (tpc.proxy().tableExists(login, testTable))
+ tpc.proxy().deleteTable(login, testTable);
+
+ tpc.proxy().createTable(login, testTable, true, TimeType.MILLIS);
+
+ System.out.println("Listing: " + tpc.proxy().listTables(login));
+
+ System.out.println("Writing: ");
+ Date start = new Date();
+ Date then = new Date();
+ int maxInserts = 1000000;
+ String format = "%1$05d";
+ Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+ for (int i = 0; i < maxInserts; i++) {
+ String result = String.format(format, i);
+ ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap(("cf" + i).getBytes(UTF_8)), ByteBuffer.wrap(("cq" + i).getBytes(UTF_8)));
+ update.setValue(Util.randStringBuffer(10));
+ mutations.put(ByteBuffer.wrap(result.getBytes(UTF_8)), Collections.singletonList(update));
+
+ if (i % 1000 == 0) {
+ tpc.proxy().updateAndFlush(login, testTable, mutations);
+ mutations.clear();
+ }
+ }
+ tpc.proxy().updateAndFlush(login, testTable, mutations);
+ Date end = new Date();
+ System.out.println(" End of writing: " + (end.getTime() - start.getTime()));
+
+ tpc.proxy().deleteTable(login, testTable);
+ tpc.proxy().createTable(login, testTable, true, TimeType.MILLIS);
+
+ // Thread.sleep(1000);
+
+ System.out.println("Writing async: ");
+ start = new Date();
+ then = new Date();
+ mutations.clear();
+ String writer = tpc.proxy().createWriter(login, testTable, null);
+ for (int i = 0; i < maxInserts; i++) {
+ String result = String.format(format, i);
+ Key pkey = new Key();
+ pkey.setRow(result.getBytes(UTF_8));
+ ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap(("cf" + i).getBytes(UTF_8)), ByteBuffer.wrap(("cq" + i).getBytes(UTF_8)));
+ update.setValue(Util.randStringBuffer(10));
+ mutations.put(ByteBuffer.wrap(result.getBytes(UTF_8)), Collections.singletonList(update));
+ tpc.proxy().update(writer, mutations);
+ mutations.clear();
+ }
+
+ end = new Date();
+ System.out.println(" End of writing: " + (end.getTime() - start.getTime()));
+ start = end;
+ System.out.println("Closing...");
+ tpc.proxy().closeWriter(writer);
+ end = new Date();
+ System.out.println(" End of closing: " + (end.getTime() - start.getTime()));
+
+ System.out.println("Reading: ");
+
+ String regex = "cf1.*";
+
+ IteratorSetting is = new IteratorSetting(50, regex, RegExFilter.class);
+ RegExFilter.setRegexs(is, null, regex, null, null, false);
+
+ String cookie = tpc.proxy().createScanner(login, testTable, null);
+
+ int i = 0;
+ start = new Date();
+ then = new Date();
+ boolean hasNext = true;
+
+ int k = 1000;
+ while (hasNext) {
+ ScanResult kvList = tpc.proxy().nextK(cookie, k);
+
+ Date now = new Date();
+ System.out.println(i + " " + (now.getTime() - then.getTime()));
+ then = now;
+
+ i += kvList.getResultsSize();
+ // for (TKeyValue kv:kvList.getResults()) System.out.println(new Key(kv.getKey()));
+ hasNext = kvList.isMore();
+ }
+ end = new Date();
+ System.out.println("Total entries: " + i + " total time " + (end.getTime() - start.getTime()));
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyInstanceOperations.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyInstanceOperations.java b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyInstanceOperations.java
new file mode 100644
index 0000000..ff94dd4
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyInstanceOperations.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.proxy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.Properties;
+
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.proxy.Proxy;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.server.TServer;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.net.HostAndPort;
+
+public class TestProxyInstanceOperations {
+ private static final Logger log = LoggerFactory.getLogger(TestProxyInstanceOperations.class);
+
+ protected static TServer proxy;
+ protected static TestProxyClient tpc;
+ protected static ByteBuffer userpass;
+ protected static final int port = 10197;
+
+ @BeforeClass
+ public static void setup() throws Exception {
+ Properties prop = new Properties();
+ prop.setProperty("useMockInstance", "true");
+ prop.put("tokenClass", PasswordToken.class.getName());
+
+ proxy = Proxy.createProxyServer(HostAndPort.fromParts("localhost", port), new TCompactProtocol.Factory(), prop).server;
+ log.info("Waiting for proxy to start");
+ while (!proxy.isServing()) {
+ Thread.sleep(500);
+ }
+ log.info("Proxy started");
+ tpc = new TestProxyClient("localhost", port);
+ userpass = tpc.proxy.login("root", Collections.singletonMap("password", ""));
+ }
+
+ @AfterClass
+ public static void tearDown() throws InterruptedException {
+ proxy.stop();
+ }
+
+ @Test
+ public void properties() throws TException {
+ tpc.proxy().setProperty(userpass, "test.systemprop", "whistletips");
+
+ assertEquals(tpc.proxy().getSystemConfiguration(userpass).get("test.systemprop"), "whistletips");
+ tpc.proxy().removeProperty(userpass, "test.systemprop");
+ assertNull(tpc.proxy().getSystemConfiguration(userpass).get("test.systemprop"));
+
+ }
+
+ @Test
+ public void testClassLoad() throws TException {
+ assertTrue(tpc.proxy().testClassLoad(userpass, "org.apache.accumulo.core.iterators.user.RegExFilter", "org.apache.accumulo.core.iterators.Filter"));
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java
new file mode 100644
index 0000000..1a75fea
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java
@@ -0,0 +1,468 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.proxy;
+
+import static org.junit.Assert.assertEquals;
+
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.iterators.user.RegExFilter;
+import org.apache.accumulo.proxy.Proxy;
+import org.apache.accumulo.proxy.Util;
+import org.apache.accumulo.proxy.thrift.BatchScanOptions;
+import org.apache.accumulo.proxy.thrift.ColumnUpdate;
+import org.apache.accumulo.proxy.thrift.IteratorSetting;
+import org.apache.accumulo.proxy.thrift.Key;
+import org.apache.accumulo.proxy.thrift.KeyValue;
+import org.apache.accumulo.proxy.thrift.Range;
+import org.apache.accumulo.proxy.thrift.ScanColumn;
+import org.apache.accumulo.proxy.thrift.ScanOptions;
+import org.apache.accumulo.proxy.thrift.ScanResult;
+import org.apache.accumulo.proxy.thrift.TimeType;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.server.TServer;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.net.HostAndPort;
+
+public class TestProxyReadWrite {
+ protected static TServer proxy;
+ protected static TestProxyClient tpc;
+ protected static ByteBuffer userpass;
+ protected static final int port = 10194;
+ protected static final String testtable = "testtable";
+
+ @BeforeClass
+ public static void setup() throws Exception {
+ Properties prop = new Properties();
+ prop.setProperty("useMockInstance", "true");
+ prop.put("tokenClass", PasswordToken.class.getName());
+
+ proxy = Proxy.createProxyServer(HostAndPort.fromParts("localhost", port), new TCompactProtocol.Factory(), prop).server;
+ tpc = new TestProxyClient("localhost", port);
+ userpass = tpc.proxy().login("root", Collections.singletonMap("password", ""));
+ }
+
+ @AfterClass
+ public static void tearDown() throws InterruptedException {
+ proxy.stop();
+ }
+
+ @Before
+ public void makeTestTable() throws Exception {
+ tpc.proxy().createTable(userpass, testtable, true, TimeType.MILLIS);
+ }
+
+ @After
+ public void deleteTestTable() throws Exception {
+ tpc.proxy().deleteTable(userpass, testtable);
+ }
+
+ private static void addMutation(Map<ByteBuffer,List<ColumnUpdate>> mutations, String row, String cf, String cq, String value) {
+ ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap(cf.getBytes()), ByteBuffer.wrap(cq.getBytes()));
+ update.setValue(value.getBytes());
+ mutations.put(ByteBuffer.wrap(row.getBytes()), Collections.singletonList(update));
+ }
+
+ private static void addMutation(Map<ByteBuffer,List<ColumnUpdate>> mutations, String row, String cf, String cq, String vis, String value) {
+ ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap(cf.getBytes()), ByteBuffer.wrap(cq.getBytes()));
+ update.setValue(value.getBytes());
+ update.setColVisibility(vis.getBytes());
+ mutations.put(ByteBuffer.wrap(row.getBytes()), Collections.singletonList(update));
+ }
+
+ /**
+ * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a range so only the entries between -Inf...5 come back (there should be
+ * 50,000)
+ */
+ @Test
+ public void readWriteBatchOneShotWithRange() throws Exception {
+ int maxInserts = 100000;
+ Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+ String format = "%1$05d";
+ for (int i = 0; i < maxInserts; i++) {
+ addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
+
+ if (i % 1000 == 0 || i == maxInserts - 1) {
+ tpc.proxy().updateAndFlush(userpass, testtable, mutations);
+ mutations.clear();
+ }
+ }
+
+ Key stop = new Key();
+ stop.setRow("5".getBytes());
+ BatchScanOptions options = new BatchScanOptions();
+ options.ranges = Collections.singletonList(new Range(null, false, stop, false));
+ String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
+
+ int i = 0;
+ boolean hasNext = true;
+
+ int k = 1000;
+ while (hasNext) {
+ ScanResult kvList = tpc.proxy().nextK(cookie, k);
+ i += kvList.getResultsSize();
+ hasNext = kvList.isMore();
+ }
+ assertEquals(i, 50000);
+ }
+
+ /**
+ * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a columnFamily so only the entries with specified column family come back
+ * (there should be 50,000)
+ */
+ @Test
+ public void readWriteBatchOneShotWithColumnFamilyOnly() throws Exception {
+ int maxInserts = 100000;
+ Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+ String format = "%1$05d";
+ for (int i = 0; i < maxInserts; i++) {
+
+ addMutation(mutations, String.format(format, i), "cf" + (i % 2), "cq" + (i % 2), Util.randString(10));
+
+ if (i % 1000 == 0 || i == maxInserts - 1) {
+ tpc.proxy().updateAndFlush(userpass, testtable, mutations);
+ mutations.clear();
+ }
+ }
+
+ BatchScanOptions options = new BatchScanOptions();
+
+ ScanColumn sc = new ScanColumn();
+ sc.colFamily = ByteBuffer.wrap("cf0".getBytes());
+
+ options.columns = Collections.singletonList(sc);
+ String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
+
+ int i = 0;
+ boolean hasNext = true;
+
+ int k = 1000;
+ while (hasNext) {
+ ScanResult kvList = tpc.proxy().nextK(cookie, k);
+ i += kvList.getResultsSize();
+ hasNext = kvList.isMore();
+ }
+ assertEquals(i, 50000);
+ }
+
+ /**
+ * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a columnFamily + columnQualififer so only the entries with specified column
+ * come back (there should be 50,000)
+ */
+ @Test
+ public void readWriteBatchOneShotWithFullColumn() throws Exception {
+ int maxInserts = 100000;
+ Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+ String format = "%1$05d";
+ for (int i = 0; i < maxInserts; i++) {
+
+ addMutation(mutations, String.format(format, i), "cf" + (i % 2), "cq" + (i % 2), Util.randString(10));
+
+ if (i % 1000 == 0 || i == maxInserts - 1) {
+ tpc.proxy().updateAndFlush(userpass, testtable, mutations);
+ mutations.clear();
+ }
+ }
+
+ BatchScanOptions options = new BatchScanOptions();
+
+ ScanColumn sc = new ScanColumn();
+ sc.colFamily = ByteBuffer.wrap("cf0".getBytes());
+ sc.colQualifier = ByteBuffer.wrap("cq0".getBytes());
+
+ options.columns = Collections.singletonList(sc);
+ String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
+
+ int i = 0;
+ boolean hasNext = true;
+
+ int k = 1000;
+ while (hasNext) {
+ ScanResult kvList = tpc.proxy().nextK(cookie, k);
+ i += kvList.getResultsSize();
+ hasNext = kvList.isMore();
+ }
+ assertEquals(i, 50000);
+ }
+
+ /**
+ * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Filter the results so only the even numbers come back.
+ */
+ @Test
+ public void readWriteBatchOneShotWithFilterIterator() throws Exception {
+ int maxInserts = 10000;
+ Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+ String format = "%1$05d";
+ for (int i = 0; i < maxInserts; i++) {
+ addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
+
+ if (i % 1000 == 0 || i == maxInserts - 1) {
+ tpc.proxy().updateAndFlush(userpass, testtable, mutations);
+ mutations.clear();
+ }
+
+ }
+
+ String regex = ".*[02468]";
+
+ org.apache.accumulo.core.client.IteratorSetting is = new org.apache.accumulo.core.client.IteratorSetting(50, regex, RegExFilter.class);
+ RegExFilter.setRegexs(is, regex, null, null, null, false);
+
+ IteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
+ ScanOptions opts = new ScanOptions();
+ opts.iterators = Collections.singletonList(pis);
+ String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
+
+ int i = 0;
+ boolean hasNext = true;
+
+ int k = 1000;
+ while (hasNext) {
+ ScanResult kvList = tpc.proxy().nextK(cookie, k);
+ for (KeyValue kv : kvList.getResults()) {
+ assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
+
+ i += 2;
+ }
+ hasNext = kvList.isMore();
+ }
+ }
+
+ @Test
+ public void readWriteOneShotWithRange() throws Exception {
+ int maxInserts = 100000;
+ Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+ String format = "%1$05d";
+ for (int i = 0; i < maxInserts; i++) {
+ addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
+
+ if (i % 1000 == 0 || i == maxInserts - 1) {
+ tpc.proxy().updateAndFlush(userpass, testtable, mutations);
+ mutations.clear();
+ }
+ }
+
+ Key stop = new Key();
+ stop.setRow("5".getBytes());
+ ScanOptions opts = new ScanOptions();
+ opts.range = new Range(null, false, stop, false);
+ String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
+
+ int i = 0;
+ boolean hasNext = true;
+
+ int k = 1000;
+ while (hasNext) {
+ ScanResult kvList = tpc.proxy().nextK(cookie, k);
+ i += kvList.getResultsSize();
+ hasNext = kvList.isMore();
+ }
+ assertEquals(i, 50000);
+ }
+
+ /**
+ * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Filter the results so only the even numbers come back.
+ */
+ @Test
+ public void readWriteOneShotWithFilterIterator() throws Exception {
+ int maxInserts = 10000;
+ Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+ String format = "%1$05d";
+ for (int i = 0; i < maxInserts; i++) {
+ addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
+
+ if (i % 1000 == 0 || i == maxInserts - 1) {
+
+ tpc.proxy().updateAndFlush(userpass, testtable, mutations);
+ mutations.clear();
+
+ }
+
+ }
+
+ String regex = ".*[02468]";
+
+ org.apache.accumulo.core.client.IteratorSetting is = new org.apache.accumulo.core.client.IteratorSetting(50, regex, RegExFilter.class);
+ RegExFilter.setRegexs(is, regex, null, null, null, false);
+
+ IteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
+ ScanOptions opts = new ScanOptions();
+ opts.iterators = Collections.singletonList(pis);
+ String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
+
+ int i = 0;
+ boolean hasNext = true;
+
+ int k = 1000;
+ while (hasNext) {
+ ScanResult kvList = tpc.proxy().nextK(cookie, k);
+ for (KeyValue kv : kvList.getResults()) {
+ assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
+
+ i += 2;
+ }
+ hasNext = kvList.isMore();
+ }
+ }
+
+ // @Test
+ // This test takes kind of a long time. Enable it if you think you may have memory issues.
+ public void manyWritesAndReads() throws Exception {
+ int maxInserts = 1000000;
+ Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+ String format = "%1$06d";
+ String writer = tpc.proxy().createWriter(userpass, testtable, null);
+ for (int i = 0; i < maxInserts; i++) {
+ addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
+
+ if (i % 1000 == 0 || i == maxInserts - 1) {
+
+ tpc.proxy().update(writer, mutations);
+ mutations.clear();
+
+ }
+
+ }
+
+ tpc.proxy().flush(writer);
+ tpc.proxy().closeWriter(writer);
+
+ String cookie = tpc.proxy().createScanner(userpass, testtable, null);
+
+ int i = 0;
+ boolean hasNext = true;
+
+ int k = 1000;
+ while (hasNext) {
+ ScanResult kvList = tpc.proxy().nextK(cookie, k);
+ for (KeyValue kv : kvList.getResults()) {
+ assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
+ i++;
+ }
+ hasNext = kvList.isMore();
+ if (hasNext)
+ assertEquals(k, kvList.getResults().size());
+ }
+ assertEquals(maxInserts, i);
+ }
+
+ @Test
+ public void asynchReadWrite() throws Exception {
+ int maxInserts = 10000;
+ Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+ String format = "%1$05d";
+ String writer = tpc.proxy().createWriter(userpass, testtable, null);
+ for (int i = 0; i < maxInserts; i++) {
+ addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
+
+ if (i % 1000 == 0 || i == maxInserts - 1) {
+ tpc.proxy().update(writer, mutations);
+ mutations.clear();
+ }
+ }
+
+ tpc.proxy().flush(writer);
+ tpc.proxy().closeWriter(writer);
+
+ String regex = ".*[02468]";
+
+ org.apache.accumulo.core.client.IteratorSetting is = new org.apache.accumulo.core.client.IteratorSetting(50, regex, RegExFilter.class);
+ RegExFilter.setRegexs(is, regex, null, null, null, false);
+
+ IteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
+ ScanOptions opts = new ScanOptions();
+ opts.iterators = Collections.singletonList(pis);
+ String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
+
+ int i = 0;
+ boolean hasNext = true;
+
+ int k = 1000;
+ int numRead = 0;
+ while (hasNext) {
+ ScanResult kvList = tpc.proxy().nextK(cookie, k);
+ for (KeyValue kv : kvList.getResults()) {
+ assertEquals(i, Integer.parseInt(new String(kv.getKey().getRow())));
+ numRead++;
+ i += 2;
+ }
+ hasNext = kvList.isMore();
+ }
+ assertEquals(maxInserts / 2, numRead);
+ }
+
+ @Test
+ public void testVisibility() throws Exception {
+
+ Set<ByteBuffer> auths = new HashSet<ByteBuffer>();
+ auths.add(ByteBuffer.wrap("even".getBytes()));
+ tpc.proxy().changeUserAuthorizations(userpass, "root", auths);
+
+ int maxInserts = 10000;
+ Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+ String format = "%1$05d";
+ String writer = tpc.proxy().createWriter(userpass, testtable, null);
+ for (int i = 0; i < maxInserts; i++) {
+ if (i % 2 == 0)
+ addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, "even", Util.randString(10));
+ else
+ addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, "odd", Util.randString(10));
+
+ if (i % 1000 == 0 || i == maxInserts - 1) {
+ tpc.proxy().update(writer, mutations);
+ mutations.clear();
+ }
+ }
+
+ tpc.proxy().flush(writer);
+ tpc.proxy().closeWriter(writer);
+ ScanOptions opts = new ScanOptions();
+ opts.authorizations = auths;
+ String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
+
+ int i = 0;
+ boolean hasNext = true;
+
+ int k = 1000;
+ int numRead = 0;
+ while (hasNext) {
+ ScanResult kvList = tpc.proxy().nextK(cookie, k);
+ for (KeyValue kv : kvList.getResults()) {
+ assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
+ i += 2;
+ numRead++;
+ }
+ hasNext = kvList.isMore();
+
+ }
+ assertEquals(maxInserts / 2, numRead);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/proxy/TestProxySecurityOperations.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TestProxySecurityOperations.java b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxySecurityOperations.java
new file mode 100644
index 0000000..eda38e5
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxySecurityOperations.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.proxy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.util.ByteBufferUtil;
+import org.apache.accumulo.proxy.Proxy;
+import org.apache.accumulo.proxy.thrift.SystemPermission;
+import org.apache.accumulo.proxy.thrift.TablePermission;
+import org.apache.accumulo.proxy.thrift.TimeType;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.server.TServer;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.net.HostAndPort;
+
+public class TestProxySecurityOperations {
+ protected static TServer proxy;
+ protected static TestProxyClient tpc;
+ protected static ByteBuffer userpass;
+ protected static final int port = 10196;
+ protected static final String testtable = "testtable";
+ protected static final String testuser = "VonJines";
+ protected static final ByteBuffer testpw = ByteBuffer.wrap("fiveones".getBytes());
+
+ @BeforeClass
+ public static void setup() throws Exception {
+ Properties prop = new Properties();
+ prop.setProperty("useMockInstance", "true");
+ prop.put("tokenClass", PasswordToken.class.getName());
+
+ proxy = Proxy.createProxyServer(HostAndPort.fromParts("localhost", port), new TCompactProtocol.Factory(), prop).server;
+ while (!proxy.isServing()) {
+ Thread.sleep(500);
+ }
+ tpc = new TestProxyClient("localhost", port);
+ userpass = tpc.proxy().login("root", Collections.singletonMap("password", ""));
+ }
+
+ @AfterClass
+ public static void tearDown() throws InterruptedException {
+ proxy.stop();
+ }
+
+ @Before
+ public void makeTestTableAndUser() throws Exception {
+ tpc.proxy().createTable(userpass, testtable, true, TimeType.MILLIS);
+ tpc.proxy().createLocalUser(userpass, testuser, testpw);
+ }
+
+ @After
+ public void deleteTestTable() throws Exception {
+ tpc.proxy().deleteTable(userpass, testtable);
+ tpc.proxy().dropLocalUser(userpass, testuser);
+ }
+
+ @Test
+ public void create() throws TException {
+ tpc.proxy().createLocalUser(userpass, testuser + "2", testpw);
+ assertTrue(tpc.proxy().listLocalUsers(userpass).contains(testuser + "2"));
+ tpc.proxy().dropLocalUser(userpass, testuser + "2");
+ assertTrue(!tpc.proxy().listLocalUsers(userpass).contains(testuser + "2"));
+ }
+
+ @Test
+ public void authenticate() throws TException {
+ assertTrue(tpc.proxy().authenticateUser(userpass, testuser, bb2pp(testpw)));
+ assertFalse(tpc.proxy().authenticateUser(userpass, "EvilUser", bb2pp(testpw)));
+
+ tpc.proxy().changeLocalUserPassword(userpass, testuser, ByteBuffer.wrap("newpass".getBytes()));
+ assertFalse(tpc.proxy().authenticateUser(userpass, testuser, bb2pp(testpw)));
+ assertTrue(tpc.proxy().authenticateUser(userpass, testuser, bb2pp(ByteBuffer.wrap("newpass".getBytes()))));
+
+ }
+
+ @Test
+ public void tablePermissions() throws TException {
+ tpc.proxy().grantTablePermission(userpass, testuser, testtable, TablePermission.ALTER_TABLE);
+ assertTrue(tpc.proxy().hasTablePermission(userpass, testuser, testtable, TablePermission.ALTER_TABLE));
+
+ tpc.proxy().revokeTablePermission(userpass, testuser, testtable, TablePermission.ALTER_TABLE);
+ assertFalse(tpc.proxy().hasTablePermission(userpass, testuser, testtable, TablePermission.ALTER_TABLE));
+
+ }
+
+ @Test
+ public void systemPermissions() throws TException {
+ tpc.proxy().grantSystemPermission(userpass, testuser, SystemPermission.ALTER_USER);
+ assertTrue(tpc.proxy().hasSystemPermission(userpass, testuser, SystemPermission.ALTER_USER));
+
+ tpc.proxy().revokeSystemPermission(userpass, testuser, SystemPermission.ALTER_USER);
+ assertFalse(tpc.proxy().hasSystemPermission(userpass, testuser, SystemPermission.ALTER_USER));
+
+ }
+
+ @Test
+ public void auths() throws TException {
+ HashSet<ByteBuffer> newauths = new HashSet<ByteBuffer>();
+ newauths.add(ByteBuffer.wrap("BBR".getBytes()));
+ newauths.add(ByteBuffer.wrap("Barney".getBytes()));
+ tpc.proxy().changeUserAuthorizations(userpass, testuser, newauths);
+ List<ByteBuffer> actualauths = tpc.proxy().getUserAuthorizations(userpass, testuser);
+ assertEquals(actualauths.size(), newauths.size());
+
+ for (ByteBuffer auth : actualauths) {
+ assertTrue(newauths.contains(auth));
+ }
+ }
+
+ private Map<String,String> bb2pp(ByteBuffer cf) {
+ Map<String,String> toRet = new TreeMap<String,String>();
+ toRet.put("password", ByteBufferUtil.toString(cf));
+ return toRet;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyTableOperations.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyTableOperations.java b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyTableOperations.java
new file mode 100644
index 0000000..e8d7b1e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/TestProxyTableOperations.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.proxy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.proxy.Proxy;
+import org.apache.accumulo.proxy.thrift.ColumnUpdate;
+import org.apache.accumulo.proxy.thrift.TimeType;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.server.TServer;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.net.HostAndPort;
+
+public class TestProxyTableOperations {
+
+ protected static TServer proxy;
+ protected static TestProxyClient tpc;
+ protected static ByteBuffer userpass;
+ protected static final int port = 10195;
+ protected static final String testtable = "testtable";
+
+ @BeforeClass
+ public static void setup() throws Exception {
+ Properties prop = new Properties();
+ prop.setProperty("useMockInstance", "true");
+ prop.put("tokenClass", PasswordToken.class.getName());
+
+ proxy = Proxy.createProxyServer(HostAndPort.fromParts("localhost", port), new TCompactProtocol.Factory(), prop).server;
+ while (!proxy.isServing()) {
+ Thread.sleep(500);
+ }
+ tpc = new TestProxyClient("localhost", port);
+ userpass = tpc.proxy().login("root", Collections.singletonMap("password", ""));
+ }
+
+ @AfterClass
+ public static void tearDown() throws InterruptedException {
+ proxy.stop();
+ }
+
+ @Before
+ public void makeTestTable() throws Exception {
+ tpc.proxy().createTable(userpass, testtable, true, TimeType.MILLIS);
+ }
+
+ @After
+ public void deleteTestTable() throws Exception {
+ tpc.proxy().deleteTable(userpass, testtable);
+ }
+
+ @Test
+ public void createExistsDelete() throws TException {
+ assertFalse(tpc.proxy().tableExists(userpass, "testtable2"));
+ tpc.proxy().createTable(userpass, "testtable2", true, TimeType.MILLIS);
+ assertTrue(tpc.proxy().tableExists(userpass, "testtable2"));
+ tpc.proxy().deleteTable(userpass, "testtable2");
+ assertFalse(tpc.proxy().tableExists(userpass, "testtable2"));
+ }
+
+ @Test
+ public void listRename() throws TException {
+ assertFalse(tpc.proxy().tableExists(userpass, "testtable2"));
+ tpc.proxy().renameTable(userpass, testtable, "testtable2");
+ assertTrue(tpc.proxy().tableExists(userpass, "testtable2"));
+ tpc.proxy().renameTable(userpass, "testtable2", testtable);
+ assertTrue(tpc.proxy().listTables(userpass).contains("testtable"));
+
+ }
+
+ // This test does not yet function because the backing Mock instance does not yet support merging
+ @Test
+ public void merge() throws TException {
+ Set<ByteBuffer> splits = new HashSet<ByteBuffer>();
+ splits.add(ByteBuffer.wrap("a".getBytes()));
+ splits.add(ByteBuffer.wrap("c".getBytes()));
+ splits.add(ByteBuffer.wrap("z".getBytes()));
+ tpc.proxy().addSplits(userpass, testtable, splits);
+
+ tpc.proxy().mergeTablets(userpass, testtable, ByteBuffer.wrap("b".getBytes()), ByteBuffer.wrap("d".getBytes()));
+
+ splits.remove(ByteBuffer.wrap("c".getBytes()));
+
+ List<ByteBuffer> tableSplits = tpc.proxy().listSplits(userpass, testtable, 10);
+
+ for (ByteBuffer split : tableSplits)
+ assertTrue(splits.contains(split));
+ assertTrue(tableSplits.size() == splits.size());
+
+ }
+
+ @Test
+ public void splits() throws TException {
+ Set<ByteBuffer> splits = new HashSet<ByteBuffer>();
+ splits.add(ByteBuffer.wrap("a".getBytes()));
+ splits.add(ByteBuffer.wrap("b".getBytes()));
+ splits.add(ByteBuffer.wrap("z".getBytes()));
+ tpc.proxy().addSplits(userpass, testtable, splits);
+
+ List<ByteBuffer> tableSplits = tpc.proxy().listSplits(userpass, testtable, 10);
+
+ for (ByteBuffer split : tableSplits)
+ assertTrue(splits.contains(split));
+ assertTrue(tableSplits.size() == splits.size());
+ }
+
+ @Test
+ public void constraints() throws TException {
+ int cid = tpc.proxy().addConstraint(userpass, testtable, "org.apache.accumulo.TestConstraint");
+ Map<String,Integer> constraints = tpc.proxy().listConstraints(userpass, testtable);
+ assertEquals((int) constraints.get("org.apache.accumulo.TestConstraint"), cid);
+ tpc.proxy().removeConstraint(userpass, testtable, cid);
+ constraints = tpc.proxy().listConstraints(userpass, testtable);
+ assertNull(constraints.get("org.apache.accumulo.TestConstraint"));
+ }
+
+ @Test
+ public void localityGroups() throws TException {
+ Map<String,Set<String>> groups = new HashMap<String,Set<String>>();
+ Set<String> group1 = new HashSet<String>();
+ group1.add("cf1");
+ groups.put("group1", group1);
+ Set<String> group2 = new HashSet<String>();
+ group2.add("cf2");
+ group2.add("cf3");
+ groups.put("group2", group2);
+ tpc.proxy().setLocalityGroups(userpass, testtable, groups);
+
+ Map<String,Set<String>> actualGroups = tpc.proxy().getLocalityGroups(userpass, testtable);
+
+ assertEquals(groups.size(), actualGroups.size());
+ for (String groupName : groups.keySet()) {
+ assertTrue(actualGroups.containsKey(groupName));
+ assertEquals(groups.get(groupName).size(), actualGroups.get(groupName).size());
+ for (String cf : groups.get(groupName)) {
+ assertTrue(actualGroups.get(groupName).contains(cf));
+ }
+ }
+ }
+
+ @Test
+ public void tableProperties() throws TException {
+ tpc.proxy().setTableProperty(userpass, testtable, "test.property1", "wharrrgarbl");
+ assertEquals(tpc.proxy().getTableProperties(userpass, testtable).get("test.property1"), "wharrrgarbl");
+ tpc.proxy().removeTableProperty(userpass, testtable, "test.property1");
+ assertNull(tpc.proxy().getTableProperties(userpass, testtable).get("test.property1"));
+ }
+
+ private static void addMutation(Map<ByteBuffer,List<ColumnUpdate>> mutations, String row, String cf, String cq, String value) {
+ ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap(cf.getBytes()), ByteBuffer.wrap(cq.getBytes()));
+ update.setValue(value.getBytes());
+ mutations.put(ByteBuffer.wrap(row.getBytes()), Collections.singletonList(update));
+ }
+
+ @Test
+ public void tableOperationsRowMethods() throws TException {
+ Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
+ for (int i = 0; i < 10; i++) {
+ addMutation(mutations, "" + i, "cf", "cq", "");
+ }
+ tpc.proxy().updateAndFlush(userpass, testtable, mutations);
+
+ assertEquals(tpc.proxy().getMaxRow(userpass, testtable, null, null, true, null, true), ByteBuffer.wrap("9".getBytes()));
+
+ tpc.proxy().deleteRows(userpass, testtable, ByteBuffer.wrap("51".getBytes()), ByteBuffer.wrap("99".getBytes()));
+ assertEquals(tpc.proxy().getMaxRow(userpass, testtable, null, null, true, null, true), ByteBuffer.wrap("5".getBytes()));
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
new file mode 100644
index 0000000..3a1d413
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
@@ -0,0 +1,332 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.replication;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.OutputStream;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.LongCombiner.Type;
+import org.apache.accumulo.core.iterators.user.SummingCombiner;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.accumulo.minicluster.impl.ZooKeeperBindException;
+import org.apache.accumulo.server.replication.ReplicaSystemFactory;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.accumulo.tserver.TabletServer;
+import org.apache.accumulo.tserver.replication.AccumuloReplicaSystem;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterables;
+
+/**
+ *
+ */
+public class CyclicReplicationIT {
+ private static final Logger log = LoggerFactory.getLogger(CyclicReplicationIT.class);
+
+ @Rule
+ public Timeout getTimeout() {
+ int scalingFactor = 1;
+ try {
+ scalingFactor = Integer.parseInt(System.getProperty("timeout.factor"));
+ } catch (NumberFormatException exception) {
+ log.warn("Could not parse timeout.factor, not scaling timeout");
+ }
+
+ return new Timeout(scalingFactor * 5 * 60 * 1000);
+ }
+
+ @Rule
+ public TestName testName = new TestName();
+
+ private File createTestDir(String name) {
+ File baseDir = new File(System.getProperty("user.dir") + "/target/mini-tests");
+ assertTrue(baseDir.mkdirs() || baseDir.isDirectory());
+ File testDir = new File(baseDir, this.getClass().getName() + "_" + testName.getMethodName() + "_" + name);
+ FileUtils.deleteQuietly(testDir);
+ assertTrue(testDir.mkdir());
+ return testDir;
+ }
+
+ private void setCoreSite(MiniAccumuloClusterImpl cluster) throws Exception {
+ File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml");
+ if (csFile.exists())
+ throw new RuntimeException(csFile + " already exist");
+
+ Configuration coreSite = new Configuration(false);
+ coreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "core-site.xml")));
+ coreSite.writeXml(out);
+ out.close();
+ }
+
+ /**
+ * Use the same SSL and credential provider configuration that is set up by AbstractMacIT for the other MAC used for replication
+ */
+ private void updatePeerConfigFromPrimary(MiniAccumuloConfigImpl primaryCfg, MiniAccumuloConfigImpl peerCfg) {
+ // Set the same SSL information from the primary when present
+ Map<String,String> primarySiteConfig = primaryCfg.getSiteConfig();
+ if ("true".equals(primarySiteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
+ Map<String,String> peerSiteConfig = new HashMap<String,String>();
+ peerSiteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true");
+ String keystorePath = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PATH.getKey());
+ Assert.assertNotNull("Keystore Path was null", keystorePath);
+ peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), keystorePath);
+ String truststorePath = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PATH.getKey());
+ Assert.assertNotNull("Truststore Path was null", truststorePath);
+ peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), truststorePath);
+
+ // Passwords might be stored in CredentialProvider
+ String keystorePassword = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey());
+ if (null != keystorePassword) {
+ peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), keystorePassword);
+ }
+ String truststorePassword = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey());
+ if (null != truststorePassword) {
+ peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword);
+ }
+
+ System.out.println("Setting site configuration for peer " + peerSiteConfig);
+ peerCfg.setSiteConfig(peerSiteConfig);
+ }
+
+ // Use the CredentialProvider if the primary also uses one
+ String credProvider = primarySiteConfig.get(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
+ if (null != credProvider) {
+ Map<String,String> peerSiteConfig = peerCfg.getSiteConfig();
+ peerSiteConfig.put(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey(), credProvider);
+ peerCfg.setSiteConfig(peerSiteConfig);
+ }
+ }
+
+ @Test
+ public void dataIsNotOverReplicated() throws Exception {
+ File master1Dir = createTestDir("master1"), master2Dir = createTestDir("master2");
+ String password = "password";
+
+ MiniAccumuloConfigImpl master1Cfg;
+ MiniAccumuloClusterImpl master1Cluster;
+ while (true) {
+ master1Cfg = new MiniAccumuloConfigImpl(master1Dir, password);
+ master1Cfg.setNumTservers(1);
+ master1Cfg.setInstanceName("master1");
+
+ // Set up SSL if needed
+ ConfigurableMacBase.configureForEnvironment(master1Cfg, this.getClass(), ConfigurableMacBase.getSslDir(master1Dir));
+
+ master1Cfg.setProperty(Property.REPLICATION_NAME, master1Cfg.getInstanceName());
+ master1Cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "5M");
+ master1Cfg.setProperty(Property.REPLICATION_THREADCHECK, "5m");
+ master1Cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
+ master1Cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
+ master1Cluster = new MiniAccumuloClusterImpl(master1Cfg);
+ setCoreSite(master1Cluster);
+
+ try {
+ master1Cluster.start();
+ break;
+ } catch (ZooKeeperBindException e) {
+ log.warn("Failed to start ZooKeeper on " + master1Cfg.getZooKeeperPort() + ", will retry");
+ }
+ }
+
+ MiniAccumuloConfigImpl master2Cfg;
+ MiniAccumuloClusterImpl master2Cluster;
+ while (true) {
+ master2Cfg = new MiniAccumuloConfigImpl(master2Dir, password);
+ master2Cfg.setNumTservers(1);
+ master2Cfg.setInstanceName("master2");
+
+ // Set up SSL if needed. Need to share the same SSL truststore as master1
+ this.updatePeerConfigFromPrimary(master1Cfg, master2Cfg);
+
+ master2Cfg.setProperty(Property.REPLICATION_NAME, master2Cfg.getInstanceName());
+ master2Cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "5M");
+ master2Cfg.setProperty(Property.REPLICATION_THREADCHECK, "5m");
+ master2Cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
+ master2Cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
+ master2Cluster = new MiniAccumuloClusterImpl(master2Cfg);
+ setCoreSite(master2Cluster);
+
+ try {
+ master2Cluster.start();
+ break;
+ } catch (ZooKeeperBindException e) {
+ log.warn("Failed to start ZooKeeper on " + master2Cfg.getZooKeeperPort() + ", will retry");
+ }
+ }
+
+ try {
+ Connector connMaster1 = master1Cluster.getConnector("root", new PasswordToken(password)), connMaster2 = master2Cluster.getConnector("root",
+ new PasswordToken(password));
+
+ String master1UserName = "master1", master1Password = "foo";
+ String master2UserName = "master2", master2Password = "bar";
+ String master1Table = master1Cluster.getInstanceName(), master2Table = master2Cluster.getInstanceName();
+
+ connMaster1.securityOperations().createLocalUser(master1UserName, new PasswordToken(master1Password));
+ connMaster2.securityOperations().createLocalUser(master2UserName, new PasswordToken(master2Password));
+
+ // Configure the credentials we should use to authenticate ourselves to the peer for replication
+ connMaster1.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + master2Cluster.getInstanceName(), master2UserName);
+ connMaster1.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + master2Cluster.getInstanceName(), master2Password);
+
+ connMaster2.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + master1Cluster.getInstanceName(), master1UserName);
+ connMaster2.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + master1Cluster.getInstanceName(), master1Password);
+
+ connMaster1.instanceOperations().setProperty(
+ Property.REPLICATION_PEERS.getKey() + master2Cluster.getInstanceName(),
+ ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
+ AccumuloReplicaSystem.buildConfiguration(master2Cluster.getInstanceName(), master2Cluster.getZooKeepers())));
+
+ connMaster2.instanceOperations().setProperty(
+ Property.REPLICATION_PEERS.getKey() + master1Cluster.getInstanceName(),
+ ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
+ AccumuloReplicaSystem.buildConfiguration(master1Cluster.getInstanceName(), master1Cluster.getZooKeepers())));
+
+ connMaster1.tableOperations().create(master1Table, new NewTableConfiguration().withoutDefaultIterators());
+ String master1TableId = connMaster1.tableOperations().tableIdMap().get(master1Table);
+ Assert.assertNotNull(master1TableId);
+
+ connMaster2.tableOperations().create(master2Table, new NewTableConfiguration().withoutDefaultIterators());
+ String master2TableId = connMaster2.tableOperations().tableIdMap().get(master2Table);
+ Assert.assertNotNull(master2TableId);
+
+ // Replicate master1 in the master1 cluster to master2 in the master2 cluster
+ connMaster1.tableOperations().setProperty(master1Table, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster1.tableOperations().setProperty(master1Table, Property.TABLE_REPLICATION_TARGET.getKey() + master2Cluster.getInstanceName(), master2TableId);
+
+ // Replicate master2 in the master2 cluster to master1 in the master2 cluster
+ connMaster2.tableOperations().setProperty(master2Table, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster2.tableOperations().setProperty(master2Table, Property.TABLE_REPLICATION_TARGET.getKey() + master1Cluster.getInstanceName(), master1TableId);
+
+ // Give our replication user the ability to write to the respective table
+ connMaster1.securityOperations().grantTablePermission(master1UserName, master1Table, TablePermission.WRITE);
+ connMaster2.securityOperations().grantTablePermission(master2UserName, master2Table, TablePermission.WRITE);
+
+ IteratorSetting summingCombiner = new IteratorSetting(50, SummingCombiner.class);
+ SummingCombiner.setEncodingType(summingCombiner, Type.STRING);
+ SummingCombiner.setCombineAllColumns(summingCombiner, true);
+
+ // Set a combiner on both instances that will sum multiple values
+ // We can use this to verify that the mutation was not sent multiple times
+ connMaster1.tableOperations().attachIterator(master1Table, summingCombiner);
+ connMaster2.tableOperations().attachIterator(master2Table, summingCombiner);
+
+ // Write a single entry
+ BatchWriter bw = connMaster1.createBatchWriter(master1Table, new BatchWriterConfig());
+ Mutation m = new Mutation("row");
+ m.put("count", "", "1");
+ bw.addMutation(m);
+ bw.close();
+
+ Set<String> files = connMaster1.replicationOperations().referencedFiles(master1Table);
+
+ log.info("Found {} that need replication from master1", files);
+
+ // Kill and restart the tserver to close the WAL on master1
+ for (ProcessReference proc : master1Cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ master1Cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+
+ master1Cluster.exec(TabletServer.class);
+
+ log.info("Restarted tserver on master1");
+
+ // Try to avoid ACCUMULO-2964
+ Thread.sleep(1000);
+
+ // Sanity check that the element is there on master1
+ Scanner s = connMaster1.createScanner(master1Table, Authorizations.EMPTY);
+ Entry<Key,Value> entry = Iterables.getOnlyElement(s);
+ Assert.assertEquals("1", entry.getValue().toString());
+
+ // Wait for this table to replicate
+ connMaster1.replicationOperations().drain(master1Table, files);
+
+ Thread.sleep(5000);
+
+ // Check that the element made it to master2 only once
+ s = connMaster2.createScanner(master2Table, Authorizations.EMPTY);
+ entry = Iterables.getOnlyElement(s);
+ Assert.assertEquals("1", entry.getValue().toString());
+
+ // Wait for master2 to finish replicating it back
+ files = connMaster2.replicationOperations().referencedFiles(master2Table);
+
+ // Kill and restart the tserver to close the WAL on master2
+ for (ProcessReference proc : master2Cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ master2Cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+
+ master2Cluster.exec(TabletServer.class);
+
+ // Try to avoid ACCUMULO-2964
+ Thread.sleep(1000);
+
+ // Check that the element made it to master2 only once
+ s = connMaster2.createScanner(master2Table, Authorizations.EMPTY);
+ entry = Iterables.getOnlyElement(s);
+ Assert.assertEquals("1", entry.getValue().toString());
+
+ connMaster2.replicationOperations().drain(master2Table, files);
+
+ Thread.sleep(5000);
+
+ // Verify that the entry wasn't sent back to master1
+ s = connMaster1.createScanner(master1Table, Authorizations.EMPTY);
+ entry = Iterables.getOnlyElement(s);
+ Assert.assertEquals("1", entry.getValue().toString());
+ } finally {
+ master1Cluster.stop();
+ master2Cluster.stop();
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java b/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
new file mode 100644
index 0000000..ab142d0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
@@ -0,0 +1,417 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.replication;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.ClientExecReturn;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.impl.MasterClient;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.master.thrift.MasterClientService;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.protobuf.ProtobufUtil;
+import org.apache.accumulo.core.replication.ReplicationTable;
+import org.apache.accumulo.core.rpc.ThriftUtil;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client;
+import org.apache.accumulo.core.trace.Tracer;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.log.WalStateManager;
+import org.apache.accumulo.server.log.WalStateManager.WalState;
+import org.apache.accumulo.server.replication.proto.Replication.Status;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.net.HostAndPort;
+
+/**
+ * ACCUMULO-3302 series of tests which ensure that a WAL is prematurely closed when a TServer may still continue to use it. Checking that no tablet references a
+ * WAL is insufficient to determine if a WAL will never be used in the future.
+ */
+public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacBase {
+ private static final Logger log = LoggerFactory.getLogger(GarbageCollectorCommunicatesWithTServersIT.class);
+
+ private final int GC_PERIOD_SECONDS = 1;
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ cfg.setProperty(Property.GC_CYCLE_DELAY, GC_PERIOD_SECONDS + "s");
+ // Wait longer to try to let the replication table come online before a cycle runs
+ cfg.setProperty(Property.GC_CYCLE_START, "10s");
+ cfg.setProperty(Property.REPLICATION_NAME, "master");
+ // Set really long delays for the master to do stuff for replication. We don't need
+ // it to be doing anything, so just let it sleep
+ cfg.setProperty(Property.REPLICATION_WORK_PROCESSOR_DELAY, "240s");
+ cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "240s");
+ cfg.setProperty(Property.REPLICATION_DRIVER_DELAY, "240s");
+ // Pull down the maximum size of the wal so we can test close()'ing it.
+ cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "1M");
+ coreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ /**
+ * Fetch all of the WALs referenced by tablets in the metadata table for this table
+ */
+ private Set<String> getWalsForTable(String tableName) throws Exception {
+ final Connector conn = getConnector();
+ final String tableId = conn.tableOperations().tableIdMap().get(tableName);
+
+ Assert.assertNotNull("Could not determine table ID for " + tableName, tableId);
+
+ Instance i = conn.getInstance();
+ ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
+ WalStateManager wals = new WalStateManager(conn.getInstance(), zk);
+
+ Set<String> result = new HashSet<String>();
+ for (Entry<Path,WalState> entry : wals.getAllState().entrySet()) {
+ log.debug("Reading WALs: {}={}", entry.getKey(), entry.getValue());
+ result.add(entry.getKey().toString());
+ }
+ return result;
+ }
+
+ /**
+ * Fetch all of the rfiles referenced by tablets in the metadata table for this table
+ */
+ private Set<String> getFilesForTable(String tableName) throws Exception {
+ final Connector conn = getConnector();
+ final String tableId = conn.tableOperations().tableIdMap().get(tableName);
+
+ Assert.assertNotNull("Could not determine table ID for " + tableName, tableId);
+
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ Range r = MetadataSchema.TabletsSection.getRange(tableId);
+ s.setRange(r);
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+
+ Set<String> rfiles = new HashSet<String>();
+ for (Entry<Key,Value> entry : s) {
+ log.debug("Reading RFiles: {}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
+ // uri://path/to/wal
+ String cq = entry.getKey().getColumnQualifier().toString();
+ String path = new Path(cq).toString();
+ log.debug("Normalize path to rfile: {}", path);
+ rfiles.add(path);
+ }
+
+ return rfiles;
+ }
+
+ /**
+ * Get the replication status messages for the given table that exist in the metadata table (~repl entries)
+ */
+ private Map<String,Status> getMetadataStatusForTable(String tableName) throws Exception {
+ final Connector conn = getConnector();
+ final String tableId = conn.tableOperations().tableIdMap().get(tableName);
+
+ Assert.assertNotNull("Could not determine table ID for " + tableName, tableId);
+
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ Range r = MetadataSchema.ReplicationSection.getRange();
+ s.setRange(r);
+ s.fetchColumn(MetadataSchema.ReplicationSection.COLF, new Text(tableId));
+
+ Map<String,Status> fileToStatus = new HashMap<String,Status>();
+ for (Entry<Key,Value> entry : s) {
+ Text file = new Text();
+ MetadataSchema.ReplicationSection.getFile(entry.getKey(), file);
+ Status status = Status.parseFrom(entry.getValue().get());
+ log.info("Got status for {}: {}", file, ProtobufUtil.toString(status));
+ fileToStatus.put(file.toString(), status);
+ }
+
+ return fileToStatus;
+ }
+
+ @Test
+ public void testActiveWalPrecludesClosing() throws Exception {
+ final String table = getUniqueNames(1)[0];
+ final Connector conn = getConnector();
+
+ // Bring the replication table online first and foremost
+ ReplicationTable.setOnline(conn);
+
+ log.info("Creating {}", table);
+ conn.tableOperations().create(table);
+
+ conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
+
+ log.info("Writing a few mutations to the table");
+
+ BatchWriter bw = conn.createBatchWriter(table, null);
+
+ byte[] empty = new byte[0];
+ for (int i = 0; i < 5; i++) {
+ Mutation m = new Mutation(Integer.toString(i));
+ m.put(empty, empty, empty);
+ bw.addMutation(m);
+ }
+
+ log.info("Flushing mutations to the server");
+ bw.flush();
+
+ log.info("Checking that metadata only has one WAL recorded for this table");
+
+ Set<String> wals = getWalsForTable(table);
+ Assert.assertEquals("Expected to only find two WALs for the table", 2, wals.size());
+
+ log.info("Compacting the table which will remove all WALs from the tablets");
+
+ // Flush our test table to remove the WAL references in it
+ conn.tableOperations().flush(table, null, null, true);
+ // Flush the metadata table too because it will have a reference to the WAL
+ conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
+
+ log.info("Waiting for replication table to come online");
+
+ log.info("Fetching replication statuses from metadata table");
+
+ Map<String,Status> fileToStatus = getMetadataStatusForTable(table);
+
+ Assert.assertEquals("Expected to only find one replication status message", 1, fileToStatus.size());
+
+ String walName = fileToStatus.keySet().iterator().next();
+ wals.retainAll(fileToStatus.keySet());
+ Assert.assertEquals(1, wals.size());
+
+ Status status = fileToStatus.get(walName);
+
+ Assert.assertEquals("Expected Status for file to not be closed", false, status.getClosed());
+
+ Set<String> filesForTable = getFilesForTable(table);
+ Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTable.size());
+ log.info("Files for table before MajC: {}", filesForTable);
+
+ // Issue a MajC to roll a new file in HDFS
+ conn.tableOperations().compact(table, null, null, false, true);
+
+ Set<String> filesForTableAfterCompaction = getFilesForTable(table);
+
+ log.info("Files for table after MajC: {}", filesForTableAfterCompaction);
+
+ Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTableAfterCompaction.size());
+ Assert.assertNotEquals("Expected the files before and after compaction to differ", filesForTableAfterCompaction, filesForTable);
+
+ // Use the rfile which was just replaced by the MajC to determine when the GC has ran
+ Path fileToBeDeleted = new Path(filesForTable.iterator().next());
+ FileSystem fs = getCluster().getFileSystem();
+
+ boolean fileExists = fs.exists(fileToBeDeleted);
+ while (fileExists) {
+ log.info("File which should get deleted still exists: {}", fileToBeDeleted);
+ Thread.sleep(2000);
+ fileExists = fs.exists(fileToBeDeleted);
+ }
+
+ Map<String,Status> fileToStatusAfterMinc = getMetadataStatusForTable(table);
+ Assert.assertEquals("Expected to still find only one replication status message: " + fileToStatusAfterMinc, 1, fileToStatusAfterMinc.size());
+
+ Assert.assertEquals("Status before and after MinC should be identical", fileToStatus, fileToStatusAfterMinc);
+ }
+
+ @Test(timeout = 2 * 60 * 1000)
+ public void testUnreferencedWalInTserverIsClosed() throws Exception {
+ final String[] names = getUniqueNames(2);
+ // `table` will be replicated, `otherTable` is only used to roll the WAL on the tserver
+ final String table = names[0], otherTable = names[1];
+ final Connector conn = getConnector();
+
+ // Bring the replication table online first and foremost
+ ReplicationTable.setOnline(conn);
+
+ log.info("Creating {}", table);
+ conn.tableOperations().create(table);
+
+ conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
+
+ log.info("Writing a few mutations to the table");
+
+ BatchWriter bw = conn.createBatchWriter(table, null);
+
+ byte[] empty = new byte[0];
+ for (int i = 0; i < 5; i++) {
+ Mutation m = new Mutation(Integer.toString(i));
+ m.put(empty, empty, empty);
+ bw.addMutation(m);
+ }
+
+ log.info("Flushing mutations to the server");
+ bw.close();
+
+ log.info("Checking that metadata only has one WAL recorded for this table");
+
+ Set<String> wals = getWalsForTable(table);
+ Assert.assertEquals("Expected to only find two WAL for the table", 2, wals.size());
+
+ log.info("Compacting the table which will remove all WALs from the tablets");
+
+ // Flush our test table to remove the WAL references in it
+ conn.tableOperations().flush(table, null, null, true);
+ // Flush the metadata table too because it will have a reference to the WAL
+ conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
+
+ log.info("Fetching replication statuses from metadata table");
+
+ Map<String,Status> fileToStatus = getMetadataStatusForTable(table);
+
+ Assert.assertEquals("Expected to only find one replication status message", 1, fileToStatus.size());
+
+ String walName = fileToStatus.keySet().iterator().next();
+ Assert.assertTrue("Expected log file name from tablet to equal replication entry", wals.contains(walName));
+
+ Status status = fileToStatus.get(walName);
+
+ Assert.assertEquals("Expected Status for file to not be closed", false, status.getClosed());
+
+ Set<String> filesForTable = getFilesForTable(table);
+ Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTable.size());
+ log.info("Files for table before MajC: {}", filesForTable);
+
+ // Issue a MajC to roll a new file in HDFS
+ conn.tableOperations().compact(table, null, null, false, true);
+
+ Set<String> filesForTableAfterCompaction = getFilesForTable(table);
+
+ log.info("Files for table after MajC: {}", filesForTableAfterCompaction);
+
+ Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTableAfterCompaction.size());
+ Assert.assertNotEquals("Expected the files before and after compaction to differ", filesForTableAfterCompaction, filesForTable);
+
+ // Use the rfile which was just replaced by the MajC to determine when the GC has ran
+ Path fileToBeDeleted = new Path(filesForTable.iterator().next());
+ FileSystem fs = getCluster().getFileSystem();
+
+ boolean fileExists = fs.exists(fileToBeDeleted);
+ while (fileExists) {
+ log.info("File which should get deleted still exists: {}", fileToBeDeleted);
+ Thread.sleep(2000);
+ fileExists = fs.exists(fileToBeDeleted);
+ }
+
+ // At this point in time, we *know* that the GarbageCollector has run which means that the Status
+ // for our WAL should not be altered.
+
+ Map<String,Status> fileToStatusAfterMinc = getMetadataStatusForTable(table);
+ Assert.assertEquals("Expected to still find only one replication status message: " + fileToStatusAfterMinc, 1, fileToStatusAfterMinc.size());
+
+ /*
+ * To verify that the WALs is still getting closed, we have to force the tserver to close the existing WAL and open a new one instead. The easiest way to do
+ * this is to write a load of data that will exceed the 1.33% full threshold that the logger keeps track of
+ */
+
+ conn.tableOperations().create(otherTable);
+ bw = conn.createBatchWriter(otherTable, null);
+ // 500k
+ byte[] bigValue = new byte[1024 * 500];
+ Arrays.fill(bigValue, (byte) 1);
+ // 500k * 50
+ for (int i = 0; i < 50; i++) {
+ Mutation m = new Mutation(Integer.toString(i));
+ m.put(empty, empty, bigValue);
+ bw.addMutation(m);
+ if (i % 10 == 0) {
+ bw.flush();
+ }
+ }
+
+ bw.close();
+
+ conn.tableOperations().flush(otherTable, null, null, true);
+
+ // Get the tservers which the master deems as active
+ final ClientContext context = new ClientContext(conn.getInstance(), new Credentials("root", new PasswordToken(ConfigurableMacBase.ROOT_PASSWORD)),
+ getClientConfig());
+ List<String> tservers = MasterClient.execute(context, new ClientExecReturn<List<String>,MasterClientService.Client>() {
+ @Override
+ public List<String> execute(MasterClientService.Client client) throws Exception {
+ return client.getActiveTservers(Tracer.traceInfo(), context.rpcCreds());
+ }
+ });
+
+ Assert.assertEquals("Expected only one active tservers", 1, tservers.size());
+
+ HostAndPort tserver = HostAndPort.fromString(tservers.get(0));
+
+ // Get the active WALs from that server
+ log.info("Fetching active WALs from {}", tserver);
+
+ Client client = ThriftUtil.getTServerClient(tserver, context);
+ List<String> activeWalsForTserver = client.getActiveLogs(Tracer.traceInfo(), context.rpcCreds());
+
+ log.info("Active wals: {}", activeWalsForTserver);
+
+ Assert.assertEquals("Expected to find only one active WAL", 1, activeWalsForTserver.size());
+
+ String activeWal = new Path(activeWalsForTserver.get(0)).toString();
+
+ Assert.assertNotEquals("Current active WAL on tserver should not be the original WAL we saw", walName, activeWal);
+
+ log.info("Ensuring that replication status does get closed after WAL is no longer in use by Tserver");
+
+ do {
+ Map<String,Status> replicationStatuses = getMetadataStatusForTable(table);
+
+ log.info("Got replication status messages {}", replicationStatuses);
+ Assert.assertEquals("Did not expect to find additional status records", 1, replicationStatuses.size());
+
+ status = replicationStatuses.values().iterator().next();
+ log.info("Current status: {}", ProtobufUtil.toString(status));
+
+ if (status.getClosed()) {
+ return;
+ }
+
+ log.info("Status is not yet closed, waiting for garbage collector to close it");
+
+ Thread.sleep(2000);
+ } while (true);
+ }
+}
[20/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/ConditionalWriterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ConditionalWriterIT.java b/test/src/test/java/org/apache/accumulo/test/ConditionalWriterIT.java
deleted file mode 100644
index 74d3593..0000000
--- a/test/src/test/java/org/apache/accumulo/test/ConditionalWriterIT.java
+++ /dev/null
@@ -1,1349 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.UUID;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.cluster.AccumuloCluster;
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.ConditionalWriter;
-import org.apache.accumulo.core.client.ConditionalWriter.Result;
-import org.apache.accumulo.core.client.ConditionalWriter.Status;
-import org.apache.accumulo.core.client.ConditionalWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IsolatedScanner;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.RowIterator;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableDeletedException;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.TableOfflineException;
-import org.apache.accumulo.core.client.admin.NewTableConfiguration;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.ArrayByteSequence;
-import org.apache.accumulo.core.data.ByteSequence;
-import org.apache.accumulo.core.data.Condition;
-import org.apache.accumulo.core.data.ConditionalMutation;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.LongCombiner.Type;
-import org.apache.accumulo.core.iterators.user.SummingCombiner;
-import org.apache.accumulo.core.iterators.user.VersioningIterator;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.security.SystemPermission;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.trace.DistributedTrace;
-import org.apache.accumulo.core.trace.Span;
-import org.apache.accumulo.core.trace.Trace;
-import org.apache.accumulo.core.util.FastFormat;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.test.functional.BadIterator;
-import org.apache.accumulo.test.functional.SlowIterator;
-import org.apache.accumulo.tracer.TraceDump;
-import org.apache.accumulo.tracer.TraceDump.Printer;
-import org.apache.accumulo.tracer.TraceServer;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterables;
-
-/**
- *
- */
-public class ConditionalWriterIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(ConditionalWriterIT.class);
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- public static long abs(long l) {
- l = Math.abs(l); // abs(Long.MIN_VALUE) == Long.MIN_VALUE...
- if (l < 0)
- return 0;
- return l;
- }
-
- @Before
- public void deleteUsers() throws Exception {
- Connector conn = getConnector();
- Set<String> users = conn.securityOperations().listLocalUsers();
- ClusterUser user = getUser(0);
- if (users.contains(user.getPrincipal())) {
- conn.securityOperations().dropLocalUser(user.getPrincipal());
- }
- }
-
- @Test
- public void testBasic() throws Exception {
-
- Connector conn = getConnector();
- String tableName = getUniqueNames(1)[0];
-
- conn.tableOperations().create(tableName);
-
- ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig());
-
- // mutation conditional on column tx:seq not existing
- ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq"));
- cm0.put("name", "last", "doe");
- cm0.put("name", "first", "john");
- cm0.put("tx", "seq", "1");
- Assert.assertEquals(Status.ACCEPTED, cw.write(cm0).getStatus());
- Assert.assertEquals(Status.REJECTED, cw.write(cm0).getStatus());
-
- // mutation conditional on column tx:seq being 1
- ConditionalMutation cm1 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("1"));
- cm1.put("name", "last", "Doe");
- cm1.put("tx", "seq", "2");
- Assert.assertEquals(Status.ACCEPTED, cw.write(cm1).getStatus());
-
- // test condition where value differs
- ConditionalMutation cm2 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("1"));
- cm2.put("name", "last", "DOE");
- cm2.put("tx", "seq", "2");
- Assert.assertEquals(Status.REJECTED, cw.write(cm2).getStatus());
-
- // test condition where column does not exists
- ConditionalMutation cm3 = new ConditionalMutation("99006", new Condition("txtypo", "seq").setValue("1"));
- cm3.put("name", "last", "deo");
- cm3.put("tx", "seq", "2");
- Assert.assertEquals(Status.REJECTED, cw.write(cm3).getStatus());
-
- // test two conditions, where one should fail
- ConditionalMutation cm4 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("2"), new Condition("name", "last").setValue("doe"));
- cm4.put("name", "last", "deo");
- cm4.put("tx", "seq", "3");
- Assert.assertEquals(Status.REJECTED, cw.write(cm4).getStatus());
-
- // test two conditions, where one should fail
- ConditionalMutation cm5 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("1"), new Condition("name", "last").setValue("Doe"));
- cm5.put("name", "last", "deo");
- cm5.put("tx", "seq", "3");
- Assert.assertEquals(Status.REJECTED, cw.write(cm5).getStatus());
-
- // ensure rejected mutations did not write
- Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
- scanner.fetchColumn(new Text("name"), new Text("last"));
- scanner.setRange(new Range("99006"));
- Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("Doe", entry.getValue().toString());
-
- // test w/ two conditions that are met
- ConditionalMutation cm6 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("2"), new Condition("name", "last").setValue("Doe"));
- cm6.put("name", "last", "DOE");
- cm6.put("tx", "seq", "3");
- Assert.assertEquals(Status.ACCEPTED, cw.write(cm6).getStatus());
-
- entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("DOE", entry.getValue().toString());
-
- // test a conditional mutation that deletes
- ConditionalMutation cm7 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("3"));
- cm7.putDelete("name", "last");
- cm7.putDelete("name", "first");
- cm7.putDelete("tx", "seq");
- Assert.assertEquals(Status.ACCEPTED, cw.write(cm7).getStatus());
-
- Assert.assertFalse("Did not expect to find any results", scanner.iterator().hasNext());
-
- // add the row back
- Assert.assertEquals(Status.ACCEPTED, cw.write(cm0).getStatus());
- Assert.assertEquals(Status.REJECTED, cw.write(cm0).getStatus());
-
- entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("doe", entry.getValue().toString());
- }
-
- @Test
- public void testFields() throws Exception {
-
- Connector conn = getConnector();
- String tableName = getUniqueNames(1)[0];
-
- String user = null;
- ClientConfiguration clientConf = cluster.getClientConfig();
- final boolean saslEnabled = clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false);
-
- ClusterUser user1 = getUser(0);
- user = user1.getPrincipal();
- if (saslEnabled) {
- // The token is pointless for kerberos
- conn.securityOperations().createLocalUser(user, null);
- } else {
- conn.securityOperations().createLocalUser(user, new PasswordToken(user1.getPassword()));
- }
-
- Authorizations auths = new Authorizations("A", "B");
-
- conn.securityOperations().changeUserAuthorizations(user, auths);
- conn.securityOperations().grantSystemPermission(user, SystemPermission.CREATE_TABLE);
-
- conn = conn.getInstance().getConnector(user, user1.getToken());
-
- conn.tableOperations().create(tableName);
-
- ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig().setAuthorizations(auths));
-
- ColumnVisibility cva = new ColumnVisibility("A");
- ColumnVisibility cvb = new ColumnVisibility("B");
-
- ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cva));
- cm0.put("name", "last", cva, "doe");
- cm0.put("name", "first", cva, "john");
- cm0.put("tx", "seq", cva, "1");
- Assert.assertEquals(Status.ACCEPTED, cw.write(cm0).getStatus());
-
- Scanner scanner = conn.createScanner(tableName, auths);
- scanner.setRange(new Range("99006"));
- // TODO verify all columns
- scanner.fetchColumn(new Text("tx"), new Text("seq"));
- Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("1", entry.getValue().toString());
- long ts = entry.getKey().getTimestamp();
-
- // test wrong colf
- ConditionalMutation cm1 = new ConditionalMutation("99006", new Condition("txA", "seq").setVisibility(cva).setValue("1"));
- cm1.put("name", "last", cva, "Doe");
- cm1.put("name", "first", cva, "John");
- cm1.put("tx", "seq", cva, "2");
- Assert.assertEquals(Status.REJECTED, cw.write(cm1).getStatus());
-
- // test wrong colq
- ConditionalMutation cm2 = new ConditionalMutation("99006", new Condition("tx", "seqA").setVisibility(cva).setValue("1"));
- cm2.put("name", "last", cva, "Doe");
- cm2.put("name", "first", cva, "John");
- cm2.put("tx", "seq", cva, "2");
- Assert.assertEquals(Status.REJECTED, cw.write(cm2).getStatus());
-
- // test wrong colv
- ConditionalMutation cm3 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb).setValue("1"));
- cm3.put("name", "last", cva, "Doe");
- cm3.put("name", "first", cva, "John");
- cm3.put("tx", "seq", cva, "2");
- Assert.assertEquals(Status.REJECTED, cw.write(cm3).getStatus());
-
- // test wrong timestamp
- ConditionalMutation cm4 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cva).setTimestamp(ts + 1).setValue("1"));
- cm4.put("name", "last", cva, "Doe");
- cm4.put("name", "first", cva, "John");
- cm4.put("tx", "seq", cva, "2");
- Assert.assertEquals(Status.REJECTED, cw.write(cm4).getStatus());
-
- // test wrong timestamp
- ConditionalMutation cm5 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cva).setTimestamp(ts - 1).setValue("1"));
- cm5.put("name", "last", cva, "Doe");
- cm5.put("name", "first", cva, "John");
- cm5.put("tx", "seq", cva, "2");
- Assert.assertEquals(Status.REJECTED, cw.write(cm5).getStatus());
-
- // ensure no updates were made
- entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("1", entry.getValue().toString());
-
- // set all columns correctly
- ConditionalMutation cm6 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cva).setTimestamp(ts).setValue("1"));
- cm6.put("name", "last", cva, "Doe");
- cm6.put("name", "first", cva, "John");
- cm6.put("tx", "seq", cva, "2");
- Assert.assertEquals(Status.ACCEPTED, cw.write(cm6).getStatus());
-
- entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("2", entry.getValue().toString());
-
- }
-
- @Test
- public void testBadColVis() throws Exception {
- // test when a user sets a col vis in a condition that can never be seen
-
- Connector conn = getConnector();
- String tableName = getUniqueNames(1)[0];
-
- conn.tableOperations().create(tableName);
-
- Authorizations auths = new Authorizations("A", "B");
-
- conn.securityOperations().changeUserAuthorizations(getAdminPrincipal(), auths);
-
- Authorizations filteredAuths = new Authorizations("A");
-
- ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig().setAuthorizations(filteredAuths));
-
- ColumnVisibility cva = new ColumnVisibility("A");
- ColumnVisibility cvb = new ColumnVisibility("B");
- ColumnVisibility cvc = new ColumnVisibility("C");
-
- // User has authorization, but didn't include it in the writer
- ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb));
- cm0.put("name", "last", cva, "doe");
- cm0.put("name", "first", cva, "john");
- cm0.put("tx", "seq", cva, "1");
- Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm0).getStatus());
-
- ConditionalMutation cm1 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb).setValue("1"));
- cm1.put("name", "last", cva, "doe");
- cm1.put("name", "first", cva, "john");
- cm1.put("tx", "seq", cva, "1");
- Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm1).getStatus());
-
- // User does not have the authorization
- ConditionalMutation cm2 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvc));
- cm2.put("name", "last", cva, "doe");
- cm2.put("name", "first", cva, "john");
- cm2.put("tx", "seq", cva, "1");
- Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm2).getStatus());
-
- ConditionalMutation cm3 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvc).setValue("1"));
- cm3.put("name", "last", cva, "doe");
- cm3.put("name", "first", cva, "john");
- cm3.put("tx", "seq", cva, "1");
- Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm3).getStatus());
-
- // if any visibility is bad, good visibilities don't override
- ConditionalMutation cm4 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb), new Condition("tx", "seq").setVisibility(cva));
-
- cm4.put("name", "last", cva, "doe");
- cm4.put("name", "first", cva, "john");
- cm4.put("tx", "seq", cva, "1");
- Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm4).getStatus());
-
- ConditionalMutation cm5 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb).setValue("1"), new Condition("tx", "seq")
- .setVisibility(cva).setValue("1"));
- cm5.put("name", "last", cva, "doe");
- cm5.put("name", "first", cva, "john");
- cm5.put("tx", "seq", cva, "1");
- Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm5).getStatus());
-
- ConditionalMutation cm6 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb).setValue("1"),
- new Condition("tx", "seq").setVisibility(cva));
- cm6.put("name", "last", cva, "doe");
- cm6.put("name", "first", cva, "john");
- cm6.put("tx", "seq", cva, "1");
- Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm6).getStatus());
-
- ConditionalMutation cm7 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb), new Condition("tx", "seq").setVisibility(cva)
- .setValue("1"));
- cm7.put("name", "last", cva, "doe");
- cm7.put("name", "first", cva, "john");
- cm7.put("tx", "seq", cva, "1");
- Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm7).getStatus());
-
- cw.close();
-
- // test passing auths that exceed users configured auths
-
- Authorizations exceedingAuths = new Authorizations("A", "B", "D");
- ConditionalWriter cw2 = conn.createConditionalWriter(tableName, new ConditionalWriterConfig().setAuthorizations(exceedingAuths));
-
- ConditionalMutation cm8 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb), new Condition("tx", "seq").setVisibility(cva)
- .setValue("1"));
- cm8.put("name", "last", cva, "doe");
- cm8.put("name", "first", cva, "john");
- cm8.put("tx", "seq", cva, "1");
-
- try {
- Status status = cw2.write(cm8).getStatus();
- Assert.fail("Writing mutation with Authorizations the user doesn't have should fail. Got status: " + status);
- } catch (AccumuloSecurityException ase) {
- // expected, check specific failure?
- } finally {
- cw2.close();
- }
- }
-
- @Test
- public void testConstraints() throws Exception {
- // ensure constraint violations are properly reported
-
- Connector conn = getConnector();
- String tableName = getUniqueNames(1)[0];
-
- conn.tableOperations().create(tableName);
- conn.tableOperations().addConstraint(tableName, AlphaNumKeyConstraint.class.getName());
- conn.tableOperations().clone(tableName, tableName + "_clone", true, new HashMap<String,String>(), new HashSet<String>());
-
- Scanner scanner = conn.createScanner(tableName + "_clone", new Authorizations());
-
- ConditionalWriter cw = conn.createConditionalWriter(tableName + "_clone", new ConditionalWriterConfig());
-
- ConditionalMutation cm0 = new ConditionalMutation("99006+", new Condition("tx", "seq"));
- cm0.put("tx", "seq", "1");
-
- Assert.assertEquals(Status.VIOLATED, cw.write(cm0).getStatus());
- Assert.assertFalse("Should find no results in the table is mutation result was violated", scanner.iterator().hasNext());
-
- ConditionalMutation cm1 = new ConditionalMutation("99006", new Condition("tx", "seq"));
- cm1.put("tx", "seq", "1");
-
- Assert.assertEquals(Status.ACCEPTED, cw.write(cm1).getStatus());
- Assert.assertTrue("Accepted result should be returned when reading table", scanner.iterator().hasNext());
-
- cw.close();
- }
-
- @Test
- public void testIterators() throws Exception {
-
- Connector conn = getConnector();
- String tableName = getUniqueNames(1)[0];
-
- conn.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
-
- BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
-
- Mutation m = new Mutation("ACCUMULO-1000");
- m.put("count", "comments", "1");
- bw.addMutation(m);
- bw.addMutation(m);
- bw.addMutation(m);
-
- m = new Mutation("ACCUMULO-1001");
- m.put("count2", "comments", "1");
- bw.addMutation(m);
- bw.addMutation(m);
-
- m = new Mutation("ACCUMULO-1002");
- m.put("count2", "comments", "1");
- bw.addMutation(m);
- bw.addMutation(m);
-
- bw.close();
-
- IteratorSetting iterConfig = new IteratorSetting(10, SummingCombiner.class);
- SummingCombiner.setEncodingType(iterConfig, Type.STRING);
- SummingCombiner.setColumns(iterConfig, Collections.singletonList(new IteratorSetting.Column("count")));
-
- IteratorSetting iterConfig2 = new IteratorSetting(10, SummingCombiner.class);
- SummingCombiner.setEncodingType(iterConfig2, Type.STRING);
- SummingCombiner.setColumns(iterConfig2, Collections.singletonList(new IteratorSetting.Column("count2", "comments")));
-
- IteratorSetting iterConfig3 = new IteratorSetting(5, VersioningIterator.class);
- VersioningIterator.setMaxVersions(iterConfig3, 1);
-
- Scanner scanner = conn.createScanner(tableName, new Authorizations());
- scanner.addScanIterator(iterConfig);
- scanner.setRange(new Range("ACCUMULO-1000"));
- scanner.fetchColumn(new Text("count"), new Text("comments"));
-
- Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("3", entry.getValue().toString());
-
- ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig());
-
- ConditionalMutation cm0 = new ConditionalMutation("ACCUMULO-1000", new Condition("count", "comments").setValue("3"));
- cm0.put("count", "comments", "1");
- Assert.assertEquals(Status.REJECTED, cw.write(cm0).getStatus());
- entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("3", entry.getValue().toString());
-
- ConditionalMutation cm1 = new ConditionalMutation("ACCUMULO-1000", new Condition("count", "comments").setIterators(iterConfig).setValue("3"));
- cm1.put("count", "comments", "1");
- Assert.assertEquals(Status.ACCEPTED, cw.write(cm1).getStatus());
- entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("4", entry.getValue().toString());
-
- ConditionalMutation cm2 = new ConditionalMutation("ACCUMULO-1000", new Condition("count", "comments").setValue("4"));
- cm2.put("count", "comments", "1");
- Assert.assertEquals(Status.REJECTED, cw.write(cm1).getStatus());
- entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("4", entry.getValue().toString());
-
- // run test with multiple iterators passed in same batch and condition with two iterators
-
- ConditionalMutation cm3 = new ConditionalMutation("ACCUMULO-1000", new Condition("count", "comments").setIterators(iterConfig).setValue("4"));
- cm3.put("count", "comments", "1");
-
- ConditionalMutation cm4 = new ConditionalMutation("ACCUMULO-1001", new Condition("count2", "comments").setIterators(iterConfig2).setValue("2"));
- cm4.put("count2", "comments", "1");
-
- ConditionalMutation cm5 = new ConditionalMutation("ACCUMULO-1002", new Condition("count2", "comments").setIterators(iterConfig2, iterConfig3).setValue("2"));
- cm5.put("count2", "comments", "1");
-
- Iterator<Result> results = cw.write(Arrays.asList(cm3, cm4, cm5).iterator());
- Map<String,Status> actual = new HashMap<String,Status>();
-
- while (results.hasNext()) {
- Result result = results.next();
- String k = new String(result.getMutation().getRow());
- Assert.assertFalse("Did not expect to see multiple resultus for the row: " + k, actual.containsKey(k));
- actual.put(k, result.getStatus());
- }
-
- Map<String,Status> expected = new HashMap<String,Status>();
- expected.put("ACCUMULO-1000", Status.ACCEPTED);
- expected.put("ACCUMULO-1001", Status.ACCEPTED);
- expected.put("ACCUMULO-1002", Status.REJECTED);
-
- Assert.assertEquals(expected, actual);
-
- // TODO test w/ table that has iterators configured
-
- cw.close();
- }
-
- @Test
- public void testBatch() throws Exception {
-
- Connector conn = getConnector();
- String tableName = getUniqueNames(1)[0];
-
- conn.tableOperations().create(tableName);
-
- conn.securityOperations().changeUserAuthorizations(getAdminPrincipal(), new Authorizations("A", "B"));
-
- ColumnVisibility cvab = new ColumnVisibility("A|B");
-
- ArrayList<ConditionalMutation> mutations = new ArrayList<ConditionalMutation>();
-
- ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvab));
- cm0.put("name", "last", cvab, "doe");
- cm0.put("name", "first", cvab, "john");
- cm0.put("tx", "seq", cvab, "1");
- mutations.add(cm0);
-
- ConditionalMutation cm1 = new ConditionalMutation("59056", new Condition("tx", "seq").setVisibility(cvab));
- cm1.put("name", "last", cvab, "doe");
- cm1.put("name", "first", cvab, "jane");
- cm1.put("tx", "seq", cvab, "1");
- mutations.add(cm1);
-
- ConditionalMutation cm2 = new ConditionalMutation("19059", new Condition("tx", "seq").setVisibility(cvab));
- cm2.put("name", "last", cvab, "doe");
- cm2.put("name", "first", cvab, "jack");
- cm2.put("tx", "seq", cvab, "1");
- mutations.add(cm2);
-
- ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig().setAuthorizations(new Authorizations("A")));
- Iterator<Result> results = cw.write(mutations.iterator());
- int count = 0;
- while (results.hasNext()) {
- Result result = results.next();
- Assert.assertEquals(Status.ACCEPTED, result.getStatus());
- count++;
- }
-
- Assert.assertEquals(3, count);
-
- Scanner scanner = conn.createScanner(tableName, new Authorizations("A"));
- scanner.fetchColumn(new Text("tx"), new Text("seq"));
-
- for (String row : new String[] {"99006", "59056", "19059"}) {
- scanner.setRange(new Range(row));
- Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("1", entry.getValue().toString());
- }
-
- TreeSet<Text> splits = new TreeSet<Text>();
- splits.add(new Text("7"));
- splits.add(new Text("3"));
- conn.tableOperations().addSplits(tableName, splits);
-
- mutations.clear();
-
- ConditionalMutation cm3 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvab).setValue("1"));
- cm3.put("name", "last", cvab, "Doe");
- cm3.put("tx", "seq", cvab, "2");
- mutations.add(cm3);
-
- ConditionalMutation cm4 = new ConditionalMutation("59056", new Condition("tx", "seq").setVisibility(cvab));
- cm4.put("name", "last", cvab, "Doe");
- cm4.put("tx", "seq", cvab, "1");
- mutations.add(cm4);
-
- ConditionalMutation cm5 = new ConditionalMutation("19059", new Condition("tx", "seq").setVisibility(cvab).setValue("2"));
- cm5.put("name", "last", cvab, "Doe");
- cm5.put("tx", "seq", cvab, "3");
- mutations.add(cm5);
-
- results = cw.write(mutations.iterator());
- int accepted = 0;
- int rejected = 0;
- while (results.hasNext()) {
- Result result = results.next();
- if (new String(result.getMutation().getRow()).equals("99006")) {
- Assert.assertEquals(Status.ACCEPTED, result.getStatus());
- accepted++;
- } else {
- Assert.assertEquals(Status.REJECTED, result.getStatus());
- rejected++;
- }
- }
-
- Assert.assertEquals("Expected only one accepted conditional mutation", 1, accepted);
- Assert.assertEquals("Expected two rejected conditional mutations", 2, rejected);
-
- for (String row : new String[] {"59056", "19059"}) {
- scanner.setRange(new Range(row));
- Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("1", entry.getValue().toString());
- }
-
- scanner.setRange(new Range("99006"));
- Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("2", entry.getValue().toString());
-
- scanner.clearColumns();
- scanner.fetchColumn(new Text("name"), new Text("last"));
- entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("Doe", entry.getValue().toString());
-
- cw.close();
- }
-
- @Test
- public void testBigBatch() throws Exception {
-
- Connector conn = getConnector();
- String tableName = getUniqueNames(1)[0];
-
- conn.tableOperations().create(tableName);
- conn.tableOperations().addSplits(tableName, nss("2", "4", "6"));
-
- UtilWaitThread.sleep(2000);
-
- int num = 100;
-
- ArrayList<byte[]> rows = new ArrayList<byte[]>(num);
- ArrayList<ConditionalMutation> cml = new ArrayList<ConditionalMutation>(num);
-
- Random r = new Random();
- byte[] e = new byte[0];
-
- for (int i = 0; i < num; i++) {
- rows.add(FastFormat.toZeroPaddedString(abs(r.nextLong()), 16, 16, e));
- }
-
- for (int i = 0; i < num; i++) {
- ConditionalMutation cm = new ConditionalMutation(rows.get(i), new Condition("meta", "seq"));
-
- cm.put("meta", "seq", "1");
- cm.put("meta", "tx", UUID.randomUUID().toString());
-
- cml.add(cm);
- }
-
- ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig());
-
- Iterator<Result> results = cw.write(cml.iterator());
-
- int count = 0;
-
- // TODO check got each row back
- while (results.hasNext()) {
- Result result = results.next();
- Assert.assertEquals(Status.ACCEPTED, result.getStatus());
- count++;
- }
-
- Assert.assertEquals("Did not receive the expected number of results", num, count);
-
- ArrayList<ConditionalMutation> cml2 = new ArrayList<ConditionalMutation>(num);
-
- for (int i = 0; i < num; i++) {
- ConditionalMutation cm = new ConditionalMutation(rows.get(i), new Condition("meta", "seq").setValue("1"));
-
- cm.put("meta", "seq", "2");
- cm.put("meta", "tx", UUID.randomUUID().toString());
-
- cml2.add(cm);
- }
-
- count = 0;
-
- results = cw.write(cml2.iterator());
-
- while (results.hasNext()) {
- Result result = results.next();
- Assert.assertEquals(Status.ACCEPTED, result.getStatus());
- count++;
- }
-
- Assert.assertEquals("Did not receive the expected number of results", num, count);
-
- cw.close();
- }
-
- @Test
- public void testBatchErrors() throws Exception {
-
- Connector conn = getConnector();
- String tableName = getUniqueNames(1)[0];
-
- conn.tableOperations().create(tableName);
- conn.tableOperations().addConstraint(tableName, AlphaNumKeyConstraint.class.getName());
- conn.tableOperations().clone(tableName, tableName + "_clone", true, new HashMap<String,String>(), new HashSet<String>());
-
- conn.securityOperations().changeUserAuthorizations(getAdminPrincipal(), new Authorizations("A", "B"));
-
- ColumnVisibility cvaob = new ColumnVisibility("A|B");
- ColumnVisibility cvaab = new ColumnVisibility("A&B");
-
- switch ((new Random()).nextInt(3)) {
- case 1:
- conn.tableOperations().addSplits(tableName, nss("6"));
- break;
- case 2:
- conn.tableOperations().addSplits(tableName, nss("2", "95"));
- break;
- }
-
- ArrayList<ConditionalMutation> mutations = new ArrayList<ConditionalMutation>();
-
- ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvaob));
- cm0.put("name+", "last", cvaob, "doe");
- cm0.put("name", "first", cvaob, "john");
- cm0.put("tx", "seq", cvaob, "1");
- mutations.add(cm0);
-
- ConditionalMutation cm1 = new ConditionalMutation("59056", new Condition("tx", "seq").setVisibility(cvaab));
- cm1.put("name", "last", cvaab, "doe");
- cm1.put("name", "first", cvaab, "jane");
- cm1.put("tx", "seq", cvaab, "1");
- mutations.add(cm1);
-
- ConditionalMutation cm2 = new ConditionalMutation("19059", new Condition("tx", "seq").setVisibility(cvaob));
- cm2.put("name", "last", cvaob, "doe");
- cm2.put("name", "first", cvaob, "jack");
- cm2.put("tx", "seq", cvaob, "1");
- mutations.add(cm2);
-
- ConditionalMutation cm3 = new ConditionalMutation("90909", new Condition("tx", "seq").setVisibility(cvaob).setValue("1"));
- cm3.put("name", "last", cvaob, "doe");
- cm3.put("name", "first", cvaob, "john");
- cm3.put("tx", "seq", cvaob, "2");
- mutations.add(cm3);
-
- ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig().setAuthorizations(new Authorizations("A")));
- Iterator<Result> results = cw.write(mutations.iterator());
- HashSet<String> rows = new HashSet<String>();
- while (results.hasNext()) {
- Result result = results.next();
- String row = new String(result.getMutation().getRow());
- if (row.equals("19059")) {
- Assert.assertEquals(Status.ACCEPTED, result.getStatus());
- } else if (row.equals("59056")) {
- Assert.assertEquals(Status.INVISIBLE_VISIBILITY, result.getStatus());
- } else if (row.equals("99006")) {
- Assert.assertEquals(Status.VIOLATED, result.getStatus());
- } else if (row.equals("90909")) {
- Assert.assertEquals(Status.REJECTED, result.getStatus());
- }
- rows.add(row);
- }
-
- Assert.assertEquals(4, rows.size());
-
- Scanner scanner = conn.createScanner(tableName, new Authorizations("A"));
- scanner.fetchColumn(new Text("tx"), new Text("seq"));
-
- Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
- Assert.assertEquals("1", entry.getValue().toString());
-
- cw.close();
- }
-
- @Test
- public void testSameRow() throws Exception {
- // test multiple mutations for same row in same batch
-
- Connector conn = getConnector();
- String tableName = getUniqueNames(1)[0];
-
- conn.tableOperations().create(tableName);
-
- ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig());
-
- ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
- cm1.put("tx", "seq", "1");
- cm1.put("data", "x", "a");
-
- Assert.assertEquals(Status.ACCEPTED, cw.write(cm1).getStatus());
-
- ConditionalMutation cm2 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1"));
- cm2.put("tx", "seq", "2");
- cm2.put("data", "x", "b");
-
- ConditionalMutation cm3 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1"));
- cm3.put("tx", "seq", "2");
- cm3.put("data", "x", "c");
-
- ConditionalMutation cm4 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1"));
- cm4.put("tx", "seq", "2");
- cm4.put("data", "x", "d");
-
- Iterator<Result> results = cw.write(Arrays.asList(cm2, cm3, cm4).iterator());
-
- int accepted = 0;
- int rejected = 0;
- int total = 0;
-
- while (results.hasNext()) {
- Status status = results.next().getStatus();
- if (status == Status.ACCEPTED)
- accepted++;
- if (status == Status.REJECTED)
- rejected++;
- total++;
- }
-
- Assert.assertEquals("Expected one accepted result", 1, accepted);
- Assert.assertEquals("Expected two rejected results", 2, rejected);
- Assert.assertEquals("Expected three total results", 3, total);
-
- cw.close();
- }
-
- private static class Stats {
-
- ByteSequence row = null;
- int seq;
- long sum;
- int data[] = new int[10];
-
- public Stats(Iterator<Entry<Key,Value>> iterator) {
- while (iterator.hasNext()) {
- Entry<Key,Value> entry = iterator.next();
-
- if (row == null)
- row = entry.getKey().getRowData();
-
- String cf = entry.getKey().getColumnFamilyData().toString();
- String cq = entry.getKey().getColumnQualifierData().toString();
-
- if (cf.equals("data")) {
- data[Integer.parseInt(cq)] = Integer.parseInt(entry.getValue().toString());
- } else if (cf.equals("meta")) {
- if (cq.equals("sum")) {
- sum = Long.parseLong(entry.getValue().toString());
- } else if (cq.equals("seq")) {
- seq = Integer.parseInt(entry.getValue().toString());
- }
- }
- }
-
- long sum2 = 0;
-
- for (int datum : data) {
- sum2 += datum;
- }
-
- Assert.assertEquals(sum2, sum);
- }
-
- public Stats(ByteSequence row) {
- this.row = row;
- for (int i = 0; i < data.length; i++) {
- this.data[i] = 0;
- }
- this.seq = -1;
- this.sum = 0;
- }
-
- void set(int index, int value) {
- sum -= data[index];
- sum += value;
- data[index] = value;
- }
-
- ConditionalMutation toMutation() {
- Condition cond = new Condition("meta", "seq");
- if (seq >= 0)
- cond.setValue(seq + "");
-
- ConditionalMutation cm = new ConditionalMutation(row, cond);
-
- cm.put("meta", "seq", (seq + 1) + "");
- cm.put("meta", "sum", (sum) + "");
-
- for (int i = 0; i < data.length; i++) {
- cm.put("data", i + "", data[i] + "");
- }
-
- return cm;
- }
-
- @Override
- public String toString() {
- return row + " " + seq + " " + sum;
- }
- }
-
- private static class MutatorTask implements Runnable {
- String table;
- ArrayList<ByteSequence> rows;
- ConditionalWriter cw;
- Connector conn;
- AtomicBoolean failed;
-
- public MutatorTask(String table, Connector conn, ArrayList<ByteSequence> rows, ConditionalWriter cw, AtomicBoolean failed) {
- this.table = table;
- this.rows = rows;
- this.conn = conn;
- this.cw = cw;
- this.failed = failed;
- }
-
- @Override
- public void run() {
- try {
- Random rand = new Random();
-
- Scanner scanner = new IsolatedScanner(conn.createScanner(table, Authorizations.EMPTY));
-
- for (int i = 0; i < 20; i++) {
- int numRows = rand.nextInt(10) + 1;
-
- ArrayList<ByteSequence> changes = new ArrayList<ByteSequence>(numRows);
- ArrayList<ConditionalMutation> mutations = new ArrayList<ConditionalMutation>();
-
- for (int j = 0; j < numRows; j++)
- changes.add(rows.get(rand.nextInt(rows.size())));
-
- for (ByteSequence row : changes) {
- scanner.setRange(new Range(row.toString()));
- Stats stats = new Stats(scanner.iterator());
- stats.set(rand.nextInt(10), rand.nextInt(Integer.MAX_VALUE));
- mutations.add(stats.toMutation());
- }
-
- ArrayList<ByteSequence> changed = new ArrayList<ByteSequence>(numRows);
- Iterator<Result> results = cw.write(mutations.iterator());
- while (results.hasNext()) {
- Result result = results.next();
- changed.add(new ArrayByteSequence(result.getMutation().getRow()));
- }
-
- Collections.sort(changes);
- Collections.sort(changed);
-
- Assert.assertEquals(changes, changed);
-
- }
-
- } catch (Exception e) {
- log.error("{}", e.getMessage(), e);
- failed.set(true);
- }
- }
- }
-
- @Test
- public void testThreads() throws Exception {
- // test multiple threads using a single conditional writer
-
- String table = getUniqueNames(1)[0];
- Connector conn = getConnector();
-
- conn.tableOperations().create(table);
-
- Random rand = new Random();
-
- switch (rand.nextInt(3)) {
- case 1:
- conn.tableOperations().addSplits(table, nss("4"));
- break;
- case 2:
- conn.tableOperations().addSplits(table, nss("3", "5"));
- break;
- }
-
- ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
-
- ArrayList<ByteSequence> rows = new ArrayList<ByteSequence>();
-
- for (int i = 0; i < 1000; i++) {
- rows.add(new ArrayByteSequence(FastFormat.toZeroPaddedString(abs(rand.nextLong()), 16, 16, new byte[0])));
- }
-
- ArrayList<ConditionalMutation> mutations = new ArrayList<ConditionalMutation>();
-
- for (ByteSequence row : rows)
- mutations.add(new Stats(row).toMutation());
-
- ArrayList<ByteSequence> rows2 = new ArrayList<ByteSequence>();
- Iterator<Result> results = cw.write(mutations.iterator());
- while (results.hasNext()) {
- Result result = results.next();
- Assert.assertEquals(Status.ACCEPTED, result.getStatus());
- rows2.add(new ArrayByteSequence(result.getMutation().getRow()));
- }
-
- Collections.sort(rows);
- Collections.sort(rows2);
-
- Assert.assertEquals(rows, rows2);
-
- AtomicBoolean failed = new AtomicBoolean(false);
-
- ExecutorService tp = Executors.newFixedThreadPool(5);
- for (int i = 0; i < 5; i++) {
- tp.submit(new MutatorTask(table, conn, rows, cw, failed));
- }
-
- tp.shutdown();
-
- while (!tp.isTerminated()) {
- tp.awaitTermination(1, TimeUnit.MINUTES);
- }
-
- Assert.assertFalse("A MutatorTask failed with an exception", failed.get());
-
- Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
-
- RowIterator rowIter = new RowIterator(scanner);
-
- while (rowIter.hasNext()) {
- Iterator<Entry<Key,Value>> row = rowIter.next();
- new Stats(row);
- }
- }
-
- private SortedSet<Text> nss(String... splits) {
- TreeSet<Text> ret = new TreeSet<Text>();
- for (String split : splits)
- ret.add(new Text(split));
-
- return ret;
- }
-
- @Test
- public void testSecurity() throws Exception {
- // test against table user does not have read and/or write permissions for
- Connector conn = getConnector();
- String user = null;
- ClientConfiguration clientConf = cluster.getClientConfig();
- final boolean saslEnabled = clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false);
-
- // Create a new user
- ClusterUser user1 = getUser(0);
- user = user1.getPrincipal();
- if (saslEnabled) {
- conn.securityOperations().createLocalUser(user, null);
- } else {
- conn.securityOperations().createLocalUser(user, new PasswordToken(user1.getPassword()));
- }
-
- String[] tables = getUniqueNames(3);
- String table1 = tables[0], table2 = tables[1], table3 = tables[2];
-
- // Create three tables
- conn.tableOperations().create(table1);
- conn.tableOperations().create(table2);
- conn.tableOperations().create(table3);
-
- // Grant R on table1, W on table2, R/W on table3
- conn.securityOperations().grantTablePermission(user, table1, TablePermission.READ);
- conn.securityOperations().grantTablePermission(user, table2, TablePermission.WRITE);
- conn.securityOperations().grantTablePermission(user, table3, TablePermission.READ);
- conn.securityOperations().grantTablePermission(user, table3, TablePermission.WRITE);
-
- // Login as the user
- Connector conn2 = conn.getInstance().getConnector(user, user1.getToken());
-
- ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
- cm1.put("tx", "seq", "1");
- cm1.put("data", "x", "a");
-
- ConditionalWriter cw1 = conn2.createConditionalWriter(table1, new ConditionalWriterConfig());
- ConditionalWriter cw2 = conn2.createConditionalWriter(table2, new ConditionalWriterConfig());
- ConditionalWriter cw3 = conn2.createConditionalWriter(table3, new ConditionalWriterConfig());
-
- // Should be able to conditional-update a table we have R/W on
- Assert.assertEquals(Status.ACCEPTED, cw3.write(cm1).getStatus());
-
- // Conditional-update to a table we only have read on should fail
- try {
- Status status = cw1.write(cm1).getStatus();
- Assert.fail("Expected exception writing conditional mutation to table the user doesn't have write access to, Got status: " + status);
- } catch (AccumuloSecurityException ase) {
-
- }
-
- // Conditional-update to a table we only have writer on should fail
- try {
- Status status = cw2.write(cm1).getStatus();
- Assert.fail("Expected exception writing conditional mutation to table the user doesn't have read access to. Got status: " + status);
- } catch (AccumuloSecurityException ase) {
-
- }
- }
-
- @Test
- public void testTimeout() throws Exception {
- Connector conn = getConnector();
-
- String table = getUniqueNames(1)[0];
-
- conn.tableOperations().create(table);
-
- ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig().setTimeout(3, TimeUnit.SECONDS));
-
- ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
- cm1.put("tx", "seq", "1");
- cm1.put("data", "x", "a");
-
- Assert.assertEquals(cw.write(cm1).getStatus(), Status.ACCEPTED);
-
- IteratorSetting is = new IteratorSetting(5, SlowIterator.class);
- SlowIterator.setSeekSleepTime(is, 5000);
-
- ConditionalMutation cm2 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1").setIterators(is));
- cm2.put("tx", "seq", "2");
- cm2.put("data", "x", "b");
-
- Assert.assertEquals(cw.write(cm2).getStatus(), Status.UNKNOWN);
-
- Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
-
- for (Entry<Key,Value> entry : scanner) {
- String cf = entry.getKey().getColumnFamilyData().toString();
- String cq = entry.getKey().getColumnQualifierData().toString();
- String val = entry.getValue().toString();
-
- if (cf.equals("tx") && cq.equals("seq"))
- Assert.assertEquals("Unexpected value in tx:seq", "1", val);
- else if (cf.equals("data") && cq.equals("x"))
- Assert.assertEquals("Unexpected value in data:x", "a", val);
- else
- Assert.fail("Saw unexpected column family and qualifier: " + entry);
- }
-
- ConditionalMutation cm3 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1"));
- cm3.put("tx", "seq", "2");
- cm3.put("data", "x", "b");
-
- Assert.assertEquals(cw.write(cm3).getStatus(), Status.ACCEPTED);
-
- cw.close();
- }
-
- @Test
- public void testDeleteTable() throws Exception {
- String table = getUniqueNames(1)[0];
- Connector conn = getConnector();
-
- try {
- conn.createConditionalWriter(table, new ConditionalWriterConfig());
- Assert.fail("Creating conditional writer for table that doesn't exist should fail");
- } catch (TableNotFoundException e) {}
-
- conn.tableOperations().create(table);
-
- ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
-
- conn.tableOperations().delete(table);
-
- ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
- cm1.put("tx", "seq", "1");
- cm1.put("data", "x", "a");
-
- Result result = cw.write(cm1);
-
- try {
- Status status = result.getStatus();
- Assert.fail("Expected exception writing conditional mutation to deleted table. Got status: " + status);
- } catch (AccumuloException ae) {
- Assert.assertEquals(TableDeletedException.class, ae.getCause().getClass());
- }
- }
-
- @Test
- public void testOffline() throws Exception {
- String table = getUniqueNames(1)[0];
- Connector conn = getConnector();
-
- conn.tableOperations().create(table);
-
- ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
-
- conn.tableOperations().offline(table, true);
-
- ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
- cm1.put("tx", "seq", "1");
- cm1.put("data", "x", "a");
-
- Result result = cw.write(cm1);
-
- try {
- Status status = result.getStatus();
- Assert.fail("Expected exception writing conditional mutation to offline table. Got status: " + status);
- } catch (AccumuloException ae) {
- Assert.assertEquals(TableOfflineException.class, ae.getCause().getClass());
- }
-
- cw.close();
-
- try {
- conn.createConditionalWriter(table, new ConditionalWriterConfig());
- Assert.fail("Expected exception creating conditional writer to offline table");
- } catch (TableOfflineException e) {}
- }
-
- @Test
- public void testError() throws Exception {
- String table = getUniqueNames(1)[0];
- Connector conn = getConnector();
-
- conn.tableOperations().create(table);
-
- ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
-
- IteratorSetting iterSetting = new IteratorSetting(5, BadIterator.class);
-
- ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq").setIterators(iterSetting));
- cm1.put("tx", "seq", "1");
- cm1.put("data", "x", "a");
-
- Result result = cw.write(cm1);
-
- try {
- Status status = result.getStatus();
- Assert.fail("Expected exception using iterator which throws an error, Got status: " + status);
- } catch (AccumuloException ae) {
-
- }
-
- cw.close();
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void testNoConditions() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
- String table = getUniqueNames(1)[0];
- Connector conn = getConnector();
-
- conn.tableOperations().create(table);
-
- ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
-
- ConditionalMutation cm1 = new ConditionalMutation("r1");
- cm1.put("tx", "seq", "1");
- cm1.put("data", "x", "a");
-
- cw.write(cm1);
- }
-
- @Test
- public void testTrace() throws Exception {
- // Need to add a getClientConfig() to AccumuloCluster
- Assume.assumeTrue(getClusterType() == ClusterType.MINI);
- Process tracer = null;
- Connector conn = getConnector();
- AccumuloCluster cluster = getCluster();
- MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
- if (!conn.tableOperations().exists("trace")) {
- tracer = mac.exec(TraceServer.class);
- while (!conn.tableOperations().exists("trace")) {
- UtilWaitThread.sleep(1000);
- }
- }
-
- String tableName = getUniqueNames(1)[0];
- conn.tableOperations().create(tableName);
-
- DistributedTrace.enable("localhost", "testTrace", mac.getClientConfig());
- UtilWaitThread.sleep(1000);
- Span root = Trace.on("traceTest");
- ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig());
-
- // mutation conditional on column tx:seq not exiting
- ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq"));
- cm0.put("name", "last", "doe");
- cm0.put("name", "first", "john");
- cm0.put("tx", "seq", "1");
- Assert.assertEquals(Status.ACCEPTED, cw.write(cm0).getStatus());
- root.stop();
-
- final Scanner scanner = conn.createScanner("trace", Authorizations.EMPTY);
- scanner.setRange(new Range(new Text(Long.toHexString(root.traceId()))));
- loop: while (true) {
- final StringBuffer finalBuffer = new StringBuffer();
- int traceCount = TraceDump.printTrace(scanner, new Printer() {
- @Override
- public void print(final String line) {
- try {
- finalBuffer.append(line).append("\n");
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
- }
- });
- String traceOutput = finalBuffer.toString();
- log.info("Trace output:" + traceOutput);
- if (traceCount > 0) {
- int lastPos = 0;
- for (String part : "traceTest, startScan,startConditionalUpdate,conditionalUpdate,Check conditions,apply conditional mutations".split(",")) {
- log.info("Looking in trace output for '" + part + "'");
- int pos = traceOutput.indexOf(part);
- if (-1 == pos) {
- log.info("Trace output doesn't contain '" + part + "'");
- Thread.sleep(1000);
- break loop;
- }
- assertTrue("Did not find '" + part + "' in output", pos > 0);
- assertTrue("'" + part + "' occurred earlier than the previous element unexpectedly", pos > lastPos);
- lastPos = pos;
- }
- break;
- } else {
- log.info("Ignoring trace output as traceCount not greater than zero: " + traceCount);
- Thread.sleep(1000);
- }
- }
- if (tracer != null) {
- tracer.destroy();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java b/test/src/test/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java
deleted file mode 100644
index bc45dda..0000000
--- a/test/src/test/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.tserver.compaction.CompactionPlan;
-import org.apache.accumulo.tserver.compaction.CompactionStrategy;
-import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
-import org.apache.accumulo.tserver.compaction.WriteParameters;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class ConfigurableMajorCompactionIT extends ConfigurableMacBase {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 30;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = new HashMap<String,String>();
- siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1s");
- cfg.setSiteConfig(siteConfig);
- }
-
- public static class TestCompactionStrategy extends CompactionStrategy {
-
- @Override
- public boolean shouldCompact(MajorCompactionRequest request) throws IOException {
- return request.getFiles().size() == 5;
- }
-
- @Override
- public CompactionPlan getCompactionPlan(MajorCompactionRequest request) throws IOException {
- CompactionPlan plan = new CompactionPlan();
- plan.inputFiles.addAll(request.getFiles().keySet());
- plan.writeParameters = new WriteParameters();
- plan.writeParameters.setBlockSize(1024 * 1024);
- plan.writeParameters.setCompressType("none");
- plan.writeParameters.setHdfsBlockSize(1024 * 1024);
- plan.writeParameters.setIndexBlockSize(10);
- plan.writeParameters.setReplication(7);
- return plan;
- }
- }
-
- @Test
- public void test() throws Exception {
- Connector conn = getConnector();
- String tableName = getUniqueNames(1)[0];
- conn.tableOperations().create(tableName);
- conn.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY.getKey(), TestCompactionStrategy.class.getName());
- writeFile(conn, tableName);
- writeFile(conn, tableName);
- writeFile(conn, tableName);
- writeFile(conn, tableName);
- UtilWaitThread.sleep(2 * 1000);
- assertEquals(4, countFiles(conn));
- writeFile(conn, tableName);
- int count = countFiles(conn);
- assertTrue(count == 1 || count == 5);
- while (count != 1) {
- UtilWaitThread.sleep(250);
- count = countFiles(conn);
- }
- }
-
- private int countFiles(Connector conn) throws Exception {
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.TabletsSection.getRange());
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
- return Iterators.size(s.iterator());
- }
-
- private void writeFile(Connector conn, String tableName) throws Exception {
- BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("row");
- m.put("cf", "cq", "value");
- bw.addMutation(m);
- bw.close();
- conn.tableOperations().flush(tableName, null, null, true);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java b/test/src/test/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java
deleted file mode 100644
index b80bcb7..0000000
--- a/test/src/test/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.NewTableConfiguration;
-import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.SharedMiniClusterBase;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-/**
- *
- */
-public class CreateTableWithNewTableConfigIT extends SharedMiniClusterBase {
- static private final Logger log = LoggerFactory.getLogger(CreateTableWithNewTableConfigIT.class);
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 30;
- };
-
- public int numProperties(Connector connector, String tableName) throws AccumuloException, TableNotFoundException {
- return Iterators.size(connector.tableOperations().getProperties(tableName).iterator());
- }
-
- public int compareProperties(Connector connector, String tableNameOrig, String tableName, String changedProp) throws AccumuloException,
- TableNotFoundException {
- boolean inNew = false;
- int countOrig = 0;
- for (Entry<String,String> orig : connector.tableOperations().getProperties(tableNameOrig)) {
- countOrig++;
- for (Entry<String,String> entry : connector.tableOperations().getProperties(tableName)) {
- if (entry.equals(orig)) {
- inNew = true;
- break;
- } else if (entry.getKey().equals(orig.getKey()) && !entry.getKey().equals(changedProp))
- Assert.fail("Property " + orig.getKey() + " has different value than deprecated method");
- }
- if (!inNew)
- Assert.fail("Original property missing after using the new create method");
- }
- return countOrig;
- }
-
- public boolean checkTimeType(Connector connector, String tableName, TimeType expectedTimeType) throws TableNotFoundException {
- final Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- String tableID = connector.tableOperations().tableIdMap().get(tableName) + "<";
- for (Entry<Key,Value> entry : scanner) {
- Key k = entry.getKey();
-
- if (k.getRow().toString().equals(tableID) && k.getColumnQualifier().toString().equals(ServerColumnFamily.TIME_COLUMN.getColumnQualifier().toString())) {
- if (expectedTimeType == TimeType.MILLIS && entry.getValue().toString().charAt(0) == 'M')
- return true;
- if (expectedTimeType == TimeType.LOGICAL && entry.getValue().toString().charAt(0) == 'L')
- return true;
- }
- }
- return false;
- }
-
- @SuppressWarnings("deprecation")
- @Test
- public void tableNameOnly() throws Exception {
- log.info("Starting tableNameOnly");
-
- // Create a table with the initial properties
- Connector connector = getConnector();
- String tableName = getUniqueNames(2)[0];
- connector.tableOperations().create(tableName, new NewTableConfiguration());
-
- String tableNameOrig = "original";
- connector.tableOperations().create(tableNameOrig, true);
-
- int countNew = numProperties(connector, tableName);
- int countOrig = compareProperties(connector, tableNameOrig, tableName, null);
-
- Assert.assertEquals("Extra properties using the new create method", countOrig, countNew);
- Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, TimeType.MILLIS));
- }
-
- @SuppressWarnings("deprecation")
- @Test
- public void tableNameAndLimitVersion() throws Exception {
- log.info("Starting tableNameAndLimitVersion");
-
- // Create a table with the initial properties
- Connector connector = getConnector();
- String tableName = getUniqueNames(2)[0];
- boolean limitVersion = false;
- connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
-
- String tableNameOrig = "originalWithLimitVersion";
- connector.tableOperations().create(tableNameOrig, limitVersion);
-
- int countNew = numProperties(connector, tableName);
- int countOrig = compareProperties(connector, tableNameOrig, tableName, null);
-
- Assert.assertEquals("Extra properties using the new create method", countOrig, countNew);
- Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, TimeType.MILLIS));
- }
-
- @SuppressWarnings("deprecation")
- @Test
- public void tableNameLimitVersionAndTimeType() throws Exception {
- log.info("Starting tableNameLimitVersionAndTimeType");
-
- // Create a table with the initial properties
- Connector connector = getConnector();
- String tableName = getUniqueNames(2)[0];
- boolean limitVersion = false;
- TimeType tt = TimeType.LOGICAL;
- connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators().setTimeType(tt));
-
- String tableNameOrig = "originalWithLimitVersionAndTimeType";
- connector.tableOperations().create(tableNameOrig, limitVersion, tt);
-
- int countNew = numProperties(connector, tableName);
- int countOrig = compareProperties(connector, tableNameOrig, tableName, null);
-
- Assert.assertEquals("Extra properties using the new create method", countOrig, countNew);
- Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, tt));
- }
-
- @SuppressWarnings("deprecation")
- @Test
- public void addCustomPropAndChangeExisting() throws Exception {
- log.info("Starting addCustomPropAndChangeExisting");
-
- // Create and populate initial properties map for creating table 1
- Map<String,String> properties = new HashMap<String,String>();
- String propertyName = Property.TABLE_SPLIT_THRESHOLD.getKey();
- String volume = "10K";
- properties.put(propertyName, volume);
-
- String propertyName2 = "table.custom.testProp";
- String volume2 = "Test property";
- properties.put(propertyName2, volume2);
-
- // Create a table with the initial properties
- Connector connector = getConnector();
- String tableName = getUniqueNames(2)[0];
- connector.tableOperations().create(tableName, new NewTableConfiguration().setProperties(properties));
-
- String tableNameOrig = "originalWithTableName";
- connector.tableOperations().create(tableNameOrig, true);
-
- int countNew = numProperties(connector, tableName);
- int countOrig = compareProperties(connector, tableNameOrig, tableName, propertyName);
-
- for (Entry<String,String> entry : connector.tableOperations().getProperties(tableName)) {
- if (entry.getKey().equals(Property.TABLE_SPLIT_THRESHOLD.getKey()))
- Assert.assertTrue("TABLE_SPLIT_THRESHOLD has been changed", entry.getValue().equals("10K"));
- if (entry.getKey().equals("table.custom.testProp"))
- Assert.assertTrue("table.custom.testProp has been changed", entry.getValue().equals("Test property"));
- }
-
- Assert.assertEquals("Extra properties using the new create method", countOrig + 1, countNew);
- Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, TimeType.MILLIS));
-
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/DumpConfigIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/DumpConfigIT.java b/test/src/test/java/org/apache/accumulo/test/DumpConfigIT.java
deleted file mode 100644
index 5cc37a5..0000000
--- a/test/src/test/java/org/apache/accumulo/test/DumpConfigIT.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.util.Collections;
-
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.test.functional.FunctionalTestUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-public class DumpConfigIT extends ConfigurableMacBase {
-
- @Rule
- public TemporaryFolder folder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
-
- @Override
- public int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setSiteConfig(Collections.singletonMap(Property.TABLE_FILE_BLOCK_SIZE.getKey(), "1234567"));
- }
-
- @Test
- public void test() throws Exception {
- File siteFileBackup = new File(folder.getRoot(), "accumulo-site.xml.bak");
- assertFalse(siteFileBackup.exists());
- assertEquals(0, exec(Admin.class, new String[] {"dumpConfig", "-a", "-d", folder.getRoot().getPath()}).waitFor());
- assertTrue(siteFileBackup.exists());
- String site = FunctionalTestUtils.readAll(new FileInputStream(siteFileBackup));
- assertTrue(site.contains(Property.TABLE_FILE_BLOCK_SIZE.getKey()));
- assertTrue(site.contains("1234567"));
- String meta = FunctionalTestUtils.readAll(new FileInputStream(new File(folder.getRoot(), MetadataTable.NAME + ".cfg")));
- assertTrue(meta.contains(Property.TABLE_FILE_REPLICATION.getKey()));
- String systemPerm = FunctionalTestUtils.readAll(new FileInputStream(new File(folder.getRoot(), "root_user.cfg")));
- assertTrue(systemPerm.contains("grant System.ALTER_USER -s -u root"));
- assertTrue(systemPerm.contains("grant Table.READ -t " + MetadataTable.NAME + " -u root"));
- assertFalse(systemPerm.contains("grant Table.DROP -t " + MetadataTable.NAME + " -u root"));
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/ExistingMacIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ExistingMacIT.java b/test/src/test/java/org/apache/accumulo/test/ExistingMacIT.java
deleted file mode 100644
index 52d2086..0000000
--- a/test/src/test/java/org/apache/accumulo/test/ExistingMacIT.java
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Collection;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Assert;
-import org.junit.Test;
-
-public class ExistingMacIT extends ConfigurableMacBase {
- @Override
- public int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
-
- // use raw local file system so walogs sync and flush will work
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- private void createEmptyConfig(File confFile) throws IOException {
- Configuration conf = new Configuration(false);
- OutputStream hcOut = new FileOutputStream(confFile);
- conf.writeXml(hcOut);
- hcOut.close();
- }
-
- @Test
- public void testExistingInstance() throws Exception {
-
- Connector conn = getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
-
- conn.tableOperations().create("table1");
-
- BatchWriter bw = conn.createBatchWriter("table1", new BatchWriterConfig());
-
- Mutation m1 = new Mutation("00081");
- m1.put("math", "sqroot", "9");
- m1.put("math", "sq", "6560");
-
- bw.addMutation(m1);
- bw.close();
-
- conn.tableOperations().flush("table1", null, null, true);
- // TOOD use constants
- conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
- conn.tableOperations().flush(RootTable.NAME, null, null, true);
-
- Set<Entry<ServerType,Collection<ProcessReference>>> procs = getCluster().getProcesses().entrySet();
- for (Entry<ServerType,Collection<ProcessReference>> entry : procs) {
- if (entry.getKey() == ServerType.ZOOKEEPER)
- continue;
- for (ProcessReference pr : entry.getValue())
- getCluster().killProcess(entry.getKey(), pr);
- }
-
- // TODO clean out zookeeper? following sleep waits for ephemeral nodes to go away
- UtilWaitThread.sleep(10000);
-
- File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf");
- FileUtils.deleteQuietly(hadoopConfDir);
- assertTrue(hadoopConfDir.mkdirs());
- createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
- createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
-
- File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_2");
- FileUtils.deleteQuietly(testDir2);
-
- MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
- macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
-
- MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
- accumulo2.start();
-
- conn = accumulo2.getConnector("root", new PasswordToken(ROOT_PASSWORD));
-
- Scanner scanner = conn.createScanner("table1", Authorizations.EMPTY);
-
- int sum = 0;
- for (Entry<Key,Value> entry : scanner) {
- sum += Integer.parseInt(entry.getValue().toString());
- }
-
- Assert.assertEquals(6569, sum);
-
- accumulo2.stop();
- }
-
- @Test
- public void testExistingRunningInstance() throws Exception {
- final String table = getUniqueNames(1)[0];
- Connector conn = getConnector();
- // Ensure that a master and tserver are up so the existing instance check won't fail.
- conn.tableOperations().create(table);
- BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
- Mutation m = new Mutation("foo");
- m.put("cf", "cq", "value");
- bw.addMutation(m);
- bw.close();
-
- File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf_2");
- FileUtils.deleteQuietly(hadoopConfDir);
- assertTrue(hadoopConfDir.mkdirs());
- createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
- createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
-
- File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_3");
- FileUtils.deleteQuietly(testDir2);
-
- MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
- macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
-
- System.out.println("conf " + new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"));
-
- MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
- try {
- accumulo2.start();
- Assert.fail("A 2nd MAC instance should not be able to start over an existing MAC instance");
- } catch (RuntimeException e) {
- // TODO check message or throw more explicit exception
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/FileArchiveIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/FileArchiveIT.java b/test/src/test/java/org/apache/accumulo/test/FileArchiveIT.java
deleted file mode 100644
index 8e51984..0000000
--- a/test/src/test/java/org/apache/accumulo/test/FileArchiveIT.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterables;
-
-/**
- * Tests that files are archived instead of deleted when configured.
- */
-public class FileArchiveIT extends ConfigurableMacBase {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
- cfg.setProperty(Property.GC_FILE_ARCHIVE, "true");
- cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
- cfg.setProperty(Property.GC_CYCLE_START, "1s");
- }
-
- @Test
- public void testUnusuedFilesAreArchived() throws Exception {
- final Connector conn = getConnector();
- final String tableName = getUniqueNames(1)[0];
-
- conn.tableOperations().create(tableName);
-
- final String tableId = conn.tableOperations().tableIdMap().get(tableName);
- Assert.assertNotNull("Could not get table ID", tableId);
-
- BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("row");
- m.put("", "", "value");
- bw.addMutation(m);
- bw.close();
-
- // Compact memory to disk
- conn.tableOperations().compact(tableName, null, null, true, true);
-
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
- Entry<Key,Value> entry = Iterables.getOnlyElement(s);
- final String file = entry.getKey().getColumnQualifier().toString();
- final Path p = new Path(file);
-
- // Then force another to make an unreferenced file
- conn.tableOperations().compact(tableName, null, null, true, true);
-
- log.info("File for table: " + file);
-
- FileSystem fs = getCluster().getFileSystem();
- int i = 0;
- while (fs.exists(p)) {
- i++;
- Thread.sleep(1000);
- if (0 == i % 10) {
- log.info("Waited " + i + " iterations, file still exists");
- }
- }
-
- log.info("File was removed");
-
- String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
-
- log.info("File relative to accumulo dir: " + filePath);
-
- Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
-
- Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
-
- // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
- Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
-
- Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
- }
-
- @Test
- public void testDeletedTableIsArchived() throws Exception {
- final Connector conn = getConnector();
- final String tableName = getUniqueNames(1)[0];
-
- conn.tableOperations().create(tableName);
-
- final String tableId = conn.tableOperations().tableIdMap().get(tableName);
- Assert.assertNotNull("Could not get table ID", tableId);
-
- BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("row");
- m.put("", "", "value");
- bw.addMutation(m);
- bw.close();
-
- // Compact memory to disk
- conn.tableOperations().compact(tableName, null, null, true, true);
-
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
- Entry<Key,Value> entry = Iterables.getOnlyElement(s);
- final String file = entry.getKey().getColumnQualifier().toString();
- final Path p = new Path(file);
-
- conn.tableOperations().delete(tableName);
-
- log.info("File for table: " + file);
-
- FileSystem fs = getCluster().getFileSystem();
- int i = 0;
- while (fs.exists(p)) {
- i++;
- Thread.sleep(1000);
- if (0 == i % 10) {
- log.info("Waited " + i + " iterations, file still exists");
- }
- }
-
- log.info("File was removed");
-
- String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
-
- log.info("File relative to accumulo dir: " + filePath);
-
- Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
-
- Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
-
- // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
- Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
-
- Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
- }
-
- @Test
- public void testUnusuedFilesAndDeletedTable() throws Exception {
- final Connector conn = getConnector();
- final String tableName = getUniqueNames(1)[0];
-
- conn.tableOperations().create(tableName);
-
- final String tableId = conn.tableOperations().tableIdMap().get(tableName);
- Assert.assertNotNull("Could not get table ID", tableId);
-
- BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("row");
- m.put("", "", "value");
- bw.addMutation(m);
- bw.close();
-
- // Compact memory to disk
- conn.tableOperations().compact(tableName, null, null, true, true);
-
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
- Entry<Key,Value> entry = Iterables.getOnlyElement(s);
- final String file = entry.getKey().getColumnQualifier().toString();
- final Path p = new Path(file);
-
- // Then force another to make an unreferenced file
- conn.tableOperations().compact(tableName, null, null, true, true);
-
- log.info("File for table: " + file);
-
- FileSystem fs = getCluster().getFileSystem();
- int i = 0;
- while (fs.exists(p)) {
- i++;
- Thread.sleep(1000);
- if (0 == i % 10) {
- log.info("Waited " + i + " iterations, file still exists");
- }
- }
-
- log.info("File was removed");
-
- String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
-
- log.info("File relative to accumulo dir: " + filePath);
-
- Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
-
- Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
-
- // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
- Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
-
- Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
-
- // Offline the table so we can be sure there is a single file
- conn.tableOperations().offline(tableName, true);
-
- // See that the file in metadata currently is
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
- entry = Iterables.getOnlyElement(s);
- final String finalFile = entry.getKey().getColumnQualifier().toString();
- final Path finalPath = new Path(finalFile);
-
- conn.tableOperations().delete(tableName);
-
- log.info("File for table: " + finalPath);
-
- i = 0;
- while (fs.exists(finalPath)) {
- i++;
- Thread.sleep(1000);
- if (0 == i % 10) {
- log.info("Waited " + i + " iterations, file still exists");
- }
- }
-
- log.info("File was removed");
-
- String finalFilePath = finalPath.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
-
- log.info("File relative to accumulo dir: " + finalFilePath);
-
- Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
-
- // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
- Path finalArchivedFile = new Path(fileArchiveDir, finalFilePath.substring(1));
-
- Assert.assertTrue("File doesn't exists in archive directory: " + finalArchivedFile, fs.exists(finalArchivedFile));
- }
-
-}
[16/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java b/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java
deleted file mode 100644
index 789b089..0000000
--- a/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.DiskUsage;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.constraints.DefaultKeySizeConstraint;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.functional.BadIterator;
-import org.apache.hadoop.io.Text;
-import org.apache.thrift.TException;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-
-public class TableOperationsIT extends AccumuloClusterHarness {
-
- static TabletClientService.Client client;
-
- private Connector connector;
-
- @Override
- public int defaultTimeoutSeconds() {
- return 30;
- }
-
- @Before
- public void setup() throws Exception {
- connector = getConnector();
- }
-
- @Test
- public void getDiskUsageErrors() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException, TException {
- String tableName = getUniqueNames(1)[0];
- connector.tableOperations().create(tableName);
- List<DiskUsage> diskUsage = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
- assertEquals(1, diskUsage.size());
- assertEquals(0, (long) diskUsage.get(0).getUsage());
- assertEquals(tableName, diskUsage.get(0).getTables().iterator().next());
-
- connector.securityOperations().revokeTablePermission(getAdminPrincipal(), tableName, TablePermission.READ);
- try {
- connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
- fail("Should throw securityexception");
- } catch (AccumuloSecurityException e) {}
-
- connector.tableOperations().delete(tableName);
- try {
- connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
- fail("Should throw tablenotfound");
- } catch (TableNotFoundException e) {}
- }
-
- @Test
- public void getDiskUsage() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException, TException {
- final String[] names = getUniqueNames(2);
- String tableName = names[0];
- connector.tableOperations().create(tableName);
-
- // verify 0 disk usage
- List<DiskUsage> diskUsages = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
- assertEquals(1, diskUsages.size());
- assertEquals(1, diskUsages.get(0).getTables().size());
- assertEquals(Long.valueOf(0), diskUsages.get(0).getUsage());
- assertEquals(tableName, diskUsages.get(0).getTables().first());
-
- // add some data
- BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("a");
- m.put("b", "c", new Value("abcde".getBytes()));
- bw.addMutation(m);
- bw.flush();
- bw.close();
-
- connector.tableOperations().compact(tableName, new Text("A"), new Text("z"), true, true);
-
- // verify we have usage
- diskUsages = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
- assertEquals(1, diskUsages.size());
- assertEquals(1, diskUsages.get(0).getTables().size());
- assertTrue(diskUsages.get(0).getUsage() > 0);
- assertEquals(tableName, diskUsages.get(0).getTables().first());
-
- String newTable = names[1];
-
- // clone table
- connector.tableOperations().clone(tableName, newTable, false, null, null);
-
- // verify tables are exactly the same
- Set<String> tables = new HashSet<String>();
- tables.add(tableName);
- tables.add(newTable);
- diskUsages = connector.tableOperations().getDiskUsage(tables);
- assertEquals(1, diskUsages.size());
- assertEquals(2, diskUsages.get(0).getTables().size());
- assertTrue(diskUsages.get(0).getUsage() > 0);
-
- connector.tableOperations().compact(tableName, new Text("A"), new Text("z"), true, true);
- connector.tableOperations().compact(newTable, new Text("A"), new Text("z"), true, true);
-
- // verify tables have differences
- diskUsages = connector.tableOperations().getDiskUsage(tables);
- assertEquals(2, diskUsages.size());
- assertEquals(1, diskUsages.get(0).getTables().size());
- assertEquals(1, diskUsages.get(1).getTables().size());
- assertTrue(diskUsages.get(0).getUsage() > 0);
- assertTrue(diskUsages.get(1).getUsage() > 0);
-
- connector.tableOperations().delete(tableName);
- }
-
- @Test
- public void createTable() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
- String tableName = getUniqueNames(1)[0];
- connector.tableOperations().create(tableName);
- Iterable<Map.Entry<String,String>> itrProps = connector.tableOperations().getProperties(tableName);
- Map<String,String> props = propsToMap(itrProps);
- assertEquals(DefaultKeySizeConstraint.class.getName(), props.get(Property.TABLE_CONSTRAINT_PREFIX.toString() + "1"));
- connector.tableOperations().delete(tableName);
- }
-
- @Test
- public void createMergeClonedTable() throws Exception {
- String[] names = getUniqueNames(2);
- String originalTable = names[0];
- TableOperations tops = connector.tableOperations();
-
- TreeSet<Text> splits = Sets.newTreeSet(Arrays.asList(new Text("a"), new Text("b"), new Text("c"), new Text("d")));
-
- tops.create(originalTable);
- tops.addSplits(originalTable, splits);
-
- BatchWriter bw = connector.createBatchWriter(originalTable, new BatchWriterConfig());
- for (Text row : splits) {
- Mutation m = new Mutation(row);
- for (int i = 0; i < 10; i++) {
- for (int j = 0; j < 10; j++) {
- m.put(Integer.toString(i), Integer.toString(j), Integer.toString(i + j));
- }
- }
-
- bw.addMutation(m);
- }
-
- bw.close();
-
- String clonedTable = names[1];
-
- tops.clone(originalTable, clonedTable, true, null, null);
- tops.merge(clonedTable, null, new Text("b"));
-
- Map<String,Integer> rowCounts = Maps.newHashMap();
- Scanner s = connector.createScanner(clonedTable, new Authorizations());
- for (Entry<Key,Value> entry : s) {
- final Key key = entry.getKey();
- String row = key.getRow().toString();
- String cf = key.getColumnFamily().toString(), cq = key.getColumnQualifier().toString();
- String value = entry.getValue().toString();
-
- if (rowCounts.containsKey(row)) {
- rowCounts.put(row, rowCounts.get(row) + 1);
- } else {
- rowCounts.put(row, 1);
- }
-
- Assert.assertEquals(Integer.parseInt(cf) + Integer.parseInt(cq), Integer.parseInt(value));
- }
-
- Collection<Text> clonedSplits = tops.listSplits(clonedTable);
- Set<Text> expectedSplits = Sets.newHashSet(new Text("b"), new Text("c"), new Text("d"));
- for (Text clonedSplit : clonedSplits) {
- Assert.assertTrue("Encountered unexpected split on the cloned table: " + clonedSplit, expectedSplits.remove(clonedSplit));
- }
-
- Assert.assertTrue("Did not find all expected splits on the cloned table: " + expectedSplits, expectedSplits.isEmpty());
- }
-
- private Map<String,String> propsToMap(Iterable<Map.Entry<String,String>> props) {
- Map<String,String> map = new HashMap<String,String>();
- for (Map.Entry<String,String> prop : props) {
- map.put(prop.getKey(), prop.getValue());
- }
- return map;
- }
-
- @Test
- public void testCompactEmptyTableWithGeneratorIterator() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
- String tableName = getUniqueNames(1)[0];
- connector.tableOperations().create(tableName);
-
- List<IteratorSetting> list = new ArrayList<>();
- list.add(new IteratorSetting(15, HardListIterator.class));
- connector.tableOperations().compact(tableName, null, null, list, true, true);
-
- Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
- Map<Key,Value> actual = new TreeMap<>(COMPARE_KEY_TO_COLQ); // only compare row, colF, colQ
- for (Map.Entry<Key,Value> entry : scanner)
- actual.put(entry.getKey(), entry.getValue());
- assertEquals(HardListIterator.allEntriesToInject, actual);
- connector.tableOperations().delete(tableName);
- }
-
- /** Compare only the row, column family and column qualifier. */
- static class KeyRowColFColQComparator implements Comparator<Key> {
- @Override
- public int compare(Key k1, Key k2) {
- return k1.compareTo(k2, PartialKey.ROW_COLFAM_COLQUAL);
- }
- }
-
- static final KeyRowColFColQComparator COMPARE_KEY_TO_COLQ = new KeyRowColFColQComparator();
-
- @Test
- public void testCompactEmptyTableWithGeneratorIterator_Splits() throws TableExistsException, AccumuloException, AccumuloSecurityException,
- TableNotFoundException {
- String tableName = getUniqueNames(1)[0];
- connector.tableOperations().create(tableName);
- SortedSet<Text> splitset = new TreeSet<>();
- splitset.add(new Text("f"));
- connector.tableOperations().addSplits(tableName, splitset);
-
- List<IteratorSetting> list = new ArrayList<>();
- list.add(new IteratorSetting(15, HardListIterator.class));
- connector.tableOperations().compact(tableName, null, null, list, true, true);
-
- Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
- Map<Key,Value> actual = new TreeMap<>(COMPARE_KEY_TO_COLQ); // only compare row, colF, colQ
- for (Map.Entry<Key,Value> entry : scanner)
- actual.put(entry.getKey(), entry.getValue());
- assertEquals(HardListIterator.allEntriesToInject, actual);
- connector.tableOperations().delete(tableName);
- }
-
- @Test
- public void testCompactEmptyTableWithGeneratorIterator_Splits_Cancel() throws TableExistsException, AccumuloException, AccumuloSecurityException,
- TableNotFoundException {
- String tableName = getUniqueNames(1)[0];
- connector.tableOperations().create(tableName);
- SortedSet<Text> splitset = new TreeSet<>();
- splitset.add(new Text("f"));
- connector.tableOperations().addSplits(tableName, splitset);
-
- List<IteratorSetting> list = new ArrayList<>();
- list.add(new IteratorSetting(15, HardListIterator.class));
- connector.tableOperations().compact(tableName, null, null, list, true, false); // don't block
- connector.tableOperations().cancelCompaction(tableName);
- // depending on timing, compaction will finish or be canceled
-
- Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
- Map<Key,Value> actual = new TreeMap<>(COMPARE_KEY_TO_COLQ); // only compare row, colF, colQ
- for (Map.Entry<Key,Value> entry : scanner)
- actual.put(entry.getKey(), entry.getValue());
- switch (actual.size()) {
- case 3:
- // Compaction cancel didn't happen in time
- assertTrue(HardListIterator.allEntriesToInject.equals(actual));
- break;
- case 2:
- // Compacted the first tablet (-inf, f)
- assertEquals(HardListIterator.allEntriesToInject.headMap(new Key("f")), actual);
- break;
- case 1:
- // Compacted the second tablet [f, +inf)
- assertEquals(HardListIterator.allEntriesToInject.tailMap(new Key("f")), actual);
- break;
- case 0:
- // Cancelled the compaction before it ran. No generated entries.
- break;
- default:
- Assert.fail("Unexpected number of entries");
- break;
- }
- connector.tableOperations().delete(tableName);
- }
-
- @Test
- public void testCompactEmptyTableWithGeneratorIterator_Splits_Partial() throws TableExistsException, AccumuloException, AccumuloSecurityException,
- TableNotFoundException {
- String tableName = getUniqueNames(1)[0];
- connector.tableOperations().create(tableName);
- Text splitRow = new Text("f");
- SortedSet<Text> splitset = new TreeSet<>();
- splitset.add(splitRow);
- connector.tableOperations().addSplits(tableName, splitset);
-
- List<IteratorSetting> list = new ArrayList<>();
- list.add(new IteratorSetting(15, HardListIterator.class));
- // compact the second tablet, not the first
- connector.tableOperations().compact(tableName, splitRow, null, list, true, true);
-
- Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
- Map<Key,Value> actual = new TreeMap<>(COMPARE_KEY_TO_COLQ); // only compare row, colF, colQ
- for (Map.Entry<Key,Value> entry : scanner)
- actual.put(entry.getKey(), entry.getValue());
- // only expect the entries in the second tablet
- assertEquals(HardListIterator.allEntriesToInject.tailMap(new Key(splitRow)), actual);
- connector.tableOperations().delete(tableName);
- }
-
- /** Test recovery from bad majc iterator via compaction cancel. */
- @Test
- public void testCompactEmptyTablesWithBadIterator_FailsAndCancel() throws TableExistsException, AccumuloException, AccumuloSecurityException,
- TableNotFoundException {
- String tableName = getUniqueNames(1)[0];
- connector.tableOperations().create(tableName);
-
- List<IteratorSetting> list = new ArrayList<>();
- list.add(new IteratorSetting(15, BadIterator.class));
- connector.tableOperations().compact(tableName, null, null, list, true, false); // don't block
- UtilWaitThread.sleep(2000); // start compaction
- connector.tableOperations().cancelCompaction(tableName);
-
- Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
- Map<Key,Value> actual = new TreeMap<>();
- for (Map.Entry<Key,Value> entry : scanner)
- actual.put(entry.getKey(), entry.getValue());
- assertTrue("Should be empty. Actual is " + actual, actual.isEmpty());
- connector.tableOperations().delete(tableName);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/TabletServerGivesUpIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/TabletServerGivesUpIT.java b/test/src/test/java/org/apache/accumulo/test/TabletServerGivesUpIT.java
deleted file mode 100644
index 06bf394..0000000
--- a/test/src/test/java/org/apache/accumulo/test/TabletServerGivesUpIT.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.TreeSet;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-// ACCUMULO-2480
-public class TabletServerGivesUpIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.useMiniDFS(true);
- cfg.setNumTservers(1);
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- }
-
- @Test(timeout = 30 * 1000)
- public void test() throws Exception {
- final Connector conn = this.getConnector();
- // Yes, there's a tabletserver
- assertEquals(1, conn.instanceOperations().getTabletServers().size());
- final String tableName = getUniqueNames(1)[0];
- conn.tableOperations().create(tableName);
- // Kill dfs
- cluster.getMiniDfs().shutdown();
- // ask the tserver to do something
- final AtomicReference<Exception> ex = new AtomicReference<>();
- Thread splitter = new Thread() {
- @Override
- public void run() {
- try {
- TreeSet<Text> splits = new TreeSet<>();
- splits.add(new Text("X"));
- conn.tableOperations().addSplits(tableName, splits);
- } catch (Exception e) {
- ex.set(e);
- }
- }
- };
- splitter.start();
- // wait for the tserver to give up on writing to the WAL
- while (conn.instanceOperations().getTabletServers().size() == 1) {
- UtilWaitThread.sleep(1000);
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/TotalQueuedIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/TotalQueuedIT.java b/test/src/test/java/org/apache/accumulo/test/TotalQueuedIT.java
deleted file mode 100644
index bf2e7f1..0000000
--- a/test/src/test/java/org/apache/accumulo/test/TotalQueuedIT.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.master.thrift.TabletServerStatus;
-import org.apache.accumulo.core.rpc.ThriftUtil;
-import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.AccumuloServerContext;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-import com.google.common.net.HostAndPort;
-
-// see ACCUMULO-1950
-public class TotalQueuedIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- cfg.setDefaultMemory(cfg.getDefaultMemory() * 2, MemoryUnit.BYTE);
- cfg.useMiniDFS();
- }
-
- int SMALL_QUEUE_SIZE = 100000;
- int LARGE_QUEUE_SIZE = SMALL_QUEUE_SIZE * 10;
- static final long N = 1000000;
-
- @Test(timeout = 4 * 60 * 1000)
- public void test() throws Exception {
- Random random = new Random();
- Connector c = getConnector();
- c.instanceOperations().setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX.getKey(), "" + SMALL_QUEUE_SIZE);
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "9999");
- c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "999");
- UtilWaitThread.sleep(1000);
- // get an idea of how fast the syncs occur
- byte row[] = new byte[250];
- BatchWriterConfig cfg = new BatchWriterConfig();
- cfg.setMaxWriteThreads(10);
- cfg.setMaxLatency(1, TimeUnit.SECONDS);
- cfg.setMaxMemory(1024 * 1024);
- long realSyncs = getSyncs();
- BatchWriter bw = c.createBatchWriter(tableName, cfg);
- long now = System.currentTimeMillis();
- long bytesSent = 0;
- for (int i = 0; i < N; i++) {
- random.nextBytes(row);
- Mutation m = new Mutation(row);
- m.put("", "", "");
- bw.addMutation(m);
- bytesSent += m.estimatedMemoryUsed();
- }
- bw.close();
- long diff = System.currentTimeMillis() - now;
- double secs = diff / 1000.;
- double syncs = bytesSent / SMALL_QUEUE_SIZE;
- double syncsPerSec = syncs / secs;
- System.out.println(String.format("Sent %d bytes in %f secs approximately %d syncs (%f syncs per sec)", bytesSent, secs, ((long) syncs), syncsPerSec));
- long update = getSyncs();
- System.out.println("Syncs " + (update - realSyncs));
- realSyncs = update;
-
- // Now with a much bigger total queue
- c.instanceOperations().setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX.getKey(), "" + LARGE_QUEUE_SIZE);
- c.tableOperations().flush(tableName, null, null, true);
- UtilWaitThread.sleep(1000);
- bw = c.createBatchWriter(tableName, cfg);
- now = System.currentTimeMillis();
- bytesSent = 0;
- for (int i = 0; i < N; i++) {
- random.nextBytes(row);
- Mutation m = new Mutation(row);
- m.put("", "", "");
- bw.addMutation(m);
- bytesSent += m.estimatedMemoryUsed();
- }
- bw.close();
- diff = System.currentTimeMillis() - now;
- secs = diff / 1000.;
- syncs = bytesSent / LARGE_QUEUE_SIZE;
- syncsPerSec = syncs / secs;
- System.out.println(String.format("Sent %d bytes in %f secs approximately %d syncs (%f syncs per sec)", bytesSent, secs, ((long) syncs), syncsPerSec));
- update = getSyncs();
- System.out.println("Syncs " + (update - realSyncs));
- assertTrue(update - realSyncs < realSyncs);
- }
-
- private long getSyncs() throws Exception {
- Connector c = getConnector();
- ServerConfigurationFactory confFactory = new ServerConfigurationFactory(c.getInstance());
- AccumuloServerContext context = new AccumuloServerContext(confFactory);
- for (String address : c.instanceOperations().getTabletServers()) {
- TabletClientService.Client client = ThriftUtil.getTServerClient(HostAndPort.fromString(address), context);
- TabletServerStatus status = client.getTabletServerStatus(null, context.rpcCreds());
- return status.syncs;
- }
- return 0;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java b/test/src/test/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
deleted file mode 100644
index 1c6e3df..0000000
--- a/test/src/test/java/org/apache/accumulo/test/TracerRecoversAfterOfflineTableIT.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertTrue;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.trace.DistributedTrace;
-import org.apache.accumulo.core.trace.Span;
-import org.apache.accumulo.core.trace.Trace;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.tracer.TraceDump;
-import org.apache.accumulo.tracer.TraceDump.Printer;
-import org.apache.accumulo.tracer.TraceServer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-/**
- *
- */
-public class TracerRecoversAfterOfflineTableIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
- cfg.setNumTservers(1);
- }
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void test() throws Exception {
- Process tracer = null;
- Connector conn = getConnector();
- if (!conn.tableOperations().exists("trace")) {
- MiniAccumuloClusterImpl mac = cluster;
- tracer = mac.exec(TraceServer.class);
- while (!conn.tableOperations().exists("trace")) {
- UtilWaitThread.sleep(1000);
- }
- UtilWaitThread.sleep(5000);
- }
-
- log.info("Taking table offline");
- conn.tableOperations().offline("trace", true);
-
- String tableName = getUniqueNames(1)[0];
- conn.tableOperations().create(tableName);
-
- log.info("Start a distributed trace span");
-
- DistributedTrace.enable("localhost", "testTrace", getClientConfig());
- Span root = Trace.on("traceTest");
- BatchWriter bw = conn.createBatchWriter(tableName, null);
- Mutation m = new Mutation("m");
- m.put("a", "b", "c");
- bw.addMutation(m);
- bw.close();
- root.stop();
-
- log.info("Bringing trace table back online");
- conn.tableOperations().online("trace", true);
-
- log.info("Trace table is online, should be able to find trace");
-
- final Scanner scanner = conn.createScanner("trace", Authorizations.EMPTY);
- scanner.setRange(new Range(new Text(Long.toHexString(root.traceId()))));
- while (true) {
- final StringBuffer finalBuffer = new StringBuffer();
- int traceCount = TraceDump.printTrace(scanner, new Printer() {
- @Override
- public void print(final String line) {
- try {
- finalBuffer.append(line).append("\n");
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
- }
- });
- String traceOutput = finalBuffer.toString();
- log.info("Trace output:" + traceOutput);
- if (traceCount > 0) {
- int lastPos = 0;
- for (String part : "traceTest,close,binMutations".split(",")) {
- log.info("Looking in trace output for '" + part + "'");
- int pos = traceOutput.indexOf(part);
- assertTrue("Did not find '" + part + "' in output", pos > 0);
- assertTrue("'" + part + "' occurred earlier than the previous element unexpectedly", pos > lastPos);
- lastPos = pos;
- }
- break;
- } else {
- log.info("Ignoring trace output as traceCount not greater than zero: " + traceCount);
- Thread.sleep(1000);
- }
- }
- if (tracer != null) {
- tracer.destroy();
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/TransportCachingIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/TransportCachingIT.java b/test/src/test/java/org/apache/accumulo/test/TransportCachingIT.java
deleted file mode 100644
index 9cc3dc0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/TransportCachingIT.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static com.google.common.base.Charsets.UTF_8;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.ThriftTransportKey;
-import org.apache.accumulo.core.client.impl.ThriftTransportPool;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.ServerServices;
-import org.apache.accumulo.core.util.ServerServices.Service;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.thrift.transport.TTransport;
-import org.apache.thrift.transport.TTransportException;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Test that {@link ThriftTransportPool} actually adheres to the cachedConnection argument
- */
-public class TransportCachingIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(TransportCachingIT.class);
-
- @Test
- public void testCachedTransport() {
- Connector conn = getConnector();
- Instance instance = conn.getInstance();
- ClientConfiguration clientConf = cluster.getClientConfig();
- ClientContext context = new ClientContext(instance, new Credentials(getAdminPrincipal(), getAdminToken()), clientConf);
- long rpcTimeout = DefaultConfiguration.getTimeInMillis(Property.GENERAL_RPC_TIMEOUT.getDefaultValue());
-
- // create list of servers
- ArrayList<ThriftTransportKey> servers = new ArrayList<ThriftTransportKey>();
-
- // add tservers
- ZooCache zc = new ZooCacheFactory().getZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
- for (String tserver : zc.getChildren(ZooUtil.getRoot(instance) + Constants.ZTSERVERS)) {
- String path = ZooUtil.getRoot(instance) + Constants.ZTSERVERS + "/" + tserver;
- byte[] data = ZooUtil.getLockData(zc, path);
- if (data != null) {
- String strData = new String(data, UTF_8);
- if (!strData.equals("master"))
- servers.add(new ThriftTransportKey(new ServerServices(strData).getAddress(Service.TSERV_CLIENT), rpcTimeout, context));
- }
- }
-
- ThriftTransportPool pool = ThriftTransportPool.getInstance();
- TTransport first = null;
- while (null == first) {
- try {
- // Get a transport (cached or not)
- first = pool.getAnyTransport(servers, true).getSecond();
- } catch (TTransportException e) {
- log.warn("Failed to obtain transport to " + servers);
- }
- }
-
- assertNotNull(first);
- // Return it to unreserve it
- pool.returnTransport(first);
-
- TTransport second = null;
- while (null == second) {
- try {
- // Get a cached transport (should be the first)
- second = pool.getAnyTransport(servers, true).getSecond();
- } catch (TTransportException e) {
- log.warn("Failed obtain 2nd transport to " + servers);
- }
- }
-
- // We should get the same transport
- assertTrue("Expected the first and second to be the same instance", first == second);
- // Return the 2nd
- pool.returnTransport(second);
-
- TTransport third = null;
- while (null == third) {
- try {
- // Get a non-cached transport
- third = pool.getAnyTransport(servers, false).getSecond();
- } catch (TTransportException e) {
- log.warn("Failed obtain 2nd transport to " + servers);
- }
- }
-
- assertFalse("Expected second and third transport to be different instances", second == third);
- pool.returnTransport(third);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/UnusedWALIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/UnusedWALIT.java b/test/src/test/java/org/apache/accumulo/test/UnusedWALIT.java
deleted file mode 100644
index 281c358..0000000
--- a/test/src/test/java/org/apache/accumulo/test/UnusedWALIT.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.UUID;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.log.WalStateManager;
-import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-// When reviewing the changes for ACCUMULO-3423, kturner suggested
-// "tablets will now have log references that contain no data,
-// so it may be marked with 3 WALs, the first with data, the 2nd without, a 3rd with data.
-// It would be useful to have an IT that will test this situation.
-public class UnusedWALIT extends ConfigurableMacBase {
-
- private ZooReaderWriter zk;
-
- @Override
- protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- final long logSize = 1024 * 1024 * 10;
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, Long.toString(logSize));
- cfg.setNumTservers(1);
- // use raw local file system so walogs sync and flush will work
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- hadoopCoreSite.set("fs.namenode.fs-limits.min-block-size", Long.toString(logSize));
- }
-
- @Test(timeout = 2 * 60 * 1000)
- public void test() throws Exception {
- // don't want this bad boy cleaning up walog entries
- getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
-
- // make two tables
- String[] tableNames = getUniqueNames(2);
- String bigTable = tableNames[0];
- String lilTable = tableNames[1];
- Connector c = getConnector();
- c.tableOperations().create(bigTable);
- c.tableOperations().create(lilTable);
-
- Instance i = c.getInstance();
- zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
-
- // put some data in a log that should be replayed for both tables
- writeSomeData(c, bigTable, 0, 10, 0, 10);
- scanSomeData(c, bigTable, 0, 10, 0, 10);
- writeSomeData(c, lilTable, 0, 1, 0, 1);
- scanSomeData(c, lilTable, 0, 1, 0, 1);
- assertEquals(2, getWALCount(i, zk));
-
- // roll the logs by pushing data into bigTable
- writeSomeData(c, bigTable, 0, 3000, 0, 1000);
- assertEquals(3, getWALCount(i, zk));
-
- // put some data in the latest log
- writeSomeData(c, lilTable, 1, 10, 0, 10);
- scanSomeData(c, lilTable, 1, 10, 0, 10);
-
- // bounce the tserver
- getCluster().getClusterControl().stop(ServerType.TABLET_SERVER);
- getCluster().getClusterControl().start(ServerType.TABLET_SERVER);
-
- // wait for the metadata table to be online
- Iterators.size(c.createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
-
- // check our two sets of data in different logs
- scanSomeData(c, lilTable, 0, 1, 0, 1);
- scanSomeData(c, lilTable, 1, 10, 0, 10);
- }
-
- private void scanSomeData(Connector c, String table, int startRow, int rowCount, int startCol, int colCount) throws Exception {
- Scanner s = c.createScanner(table, Authorizations.EMPTY);
- s.setRange(new Range(Integer.toHexString(startRow), Integer.toHexString(startRow + rowCount)));
- int row = startRow;
- int col = startCol;
- for (Entry<Key,Value> entry : s) {
- assertEquals(row, Integer.parseInt(entry.getKey().getRow().toString(), 16));
- assertEquals(col++, Integer.parseInt(entry.getKey().getColumnQualifier().toString(), 16));
- if (col == startCol + colCount) {
- col = startCol;
- row++;
- if (row == startRow + rowCount) {
- break;
- }
- }
- }
- assertEquals(row, startRow + rowCount);
- }
-
- private int getWALCount(Instance i, ZooReaderWriter zk) throws Exception {
- WalStateManager wals = new WalStateManager(i, zk);
- int result = 0;
- for (Entry<TServerInstance,List<UUID>> entry : wals.getAllMarkers().entrySet()) {
- result += entry.getValue().size();
- }
- return result;
- }
-
- private void writeSomeData(Connector conn, String table, int startRow, int rowCount, int startCol, int colCount) throws Exception {
- BatchWriterConfig config = new BatchWriterConfig();
- config.setMaxMemory(10 * 1024 * 1024);
- BatchWriter bw = conn.createBatchWriter(table, config);
- for (int r = startRow; r < startRow + rowCount; r++) {
- Mutation m = new Mutation(Integer.toHexString(r));
- for (int c = startCol; c < startCol + colCount; c++) {
- m.put("", Integer.toHexString(c), "");
- }
- bw.addMutation(m);
- }
- bw.close();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/UserCompactionStrategyIT.java b/test/src/test/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
deleted file mode 100644
index fa9e642..0000000
--- a/test/src/test/java/org/apache/accumulo/test/UserCompactionStrategyIT.java
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.test;
-
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.CompactionConfig;
-import org.apache.accumulo.core.client.admin.CompactionStrategyConfig;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.RegExFilter;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.functional.FunctionalTestUtils;
-import org.apache.accumulo.test.functional.SlowIterator;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Test;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-
-public class UserCompactionStrategyIT extends AccumuloClusterHarness {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 3 * 60;
- }
-
- @Test
- public void testDropA() throws Exception {
- Connector c = getConnector();
-
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
-
- writeFlush(c, tableName, "a");
- writeFlush(c, tableName, "b");
- // create a file that starts with A containing rows 'a' and 'b'
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
-
- writeFlush(c, tableName, "c");
- writeFlush(c, tableName, "d");
-
- // drop files that start with A
- CompactionStrategyConfig csConfig = new CompactionStrategyConfig(TestCompactionStrategy.class.getName());
- csConfig.setOptions(ImmutableMap.of("dropPrefix", "A", "inputPrefix", "F"));
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
-
- Assert.assertEquals(ImmutableSet.of("c", "d"), getRows(c, tableName));
-
- // this compaction should not drop files starting with A
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
-
- Assert.assertEquals(ImmutableSet.of("c", "d"), getRows(c, tableName));
- }
-
- private void testDropNone(Map<String,String> options) throws Exception {
-
- Connector c = getConnector();
-
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
-
- writeFlush(c, tableName, "a");
- writeFlush(c, tableName, "b");
-
- CompactionStrategyConfig csConfig = new CompactionStrategyConfig(TestCompactionStrategy.class.getName());
- csConfig.setOptions(options);
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
-
- Assert.assertEquals(ImmutableSet.of("a", "b"), getRows(c, tableName));
- }
-
- @Test
- public void testDropNone() throws Exception {
- // test a compaction strategy that selects no files. In this case there is no work to do, want to ensure it does not hang.
-
- testDropNone(ImmutableMap.of("inputPrefix", "Z"));
- }
-
- @Test
- public void testDropNone2() throws Exception {
- // test a compaction strategy that selects no files. This differs testDropNone() in that shouldCompact() will return true and getCompactionPlan() will
- // return no work to do.
-
- testDropNone(ImmutableMap.of("inputPrefix", "Z", "shouldCompact", "true"));
- }
-
- @Test
- public void testPerTableClasspath() throws Exception {
- // Can't assume that a test-resource will be on the server's classpath
- Assume.assumeTrue(ClusterType.MINI == getClusterType());
-
- // test pertable classpath + user specified compaction strat
-
- final Connector c = getConnector();
- final String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.instanceOperations().setProperty(Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "context1",
- System.getProperty("user.dir") + "/src/test/resources/TestCompactionStrat.jar");
- c.tableOperations().setProperty(tableName, Property.TABLE_CLASSPATH.getKey(), "context1");
-
- c.tableOperations().addSplits(tableName, new TreeSet<Text>(Arrays.asList(new Text("efg"))));
-
- writeFlush(c, tableName, "a");
- writeFlush(c, tableName, "b");
-
- writeFlush(c, tableName, "h");
- writeFlush(c, tableName, "i");
-
- Assert.assertEquals(4, FunctionalTestUtils.countRFiles(c, tableName));
-
- // EfgCompactionStrat will only compact a tablet w/ end row of 'efg'. No other tablets are compacted.
- CompactionStrategyConfig csConfig = new CompactionStrategyConfig("org.apache.accumulo.test.EfgCompactionStrat");
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
-
- Assert.assertEquals(3, FunctionalTestUtils.countRFiles(c, tableName));
-
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
-
- Assert.assertEquals(2, FunctionalTestUtils.countRFiles(c, tableName));
- }
-
- @Test
- public void testIterators() throws Exception {
- // test compaction strategy + iterators
-
- Connector c = getConnector();
-
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
-
- writeFlush(c, tableName, "a");
- writeFlush(c, tableName, "b");
- // create a file that starts with A containing rows 'a' and 'b'
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
-
- writeFlush(c, tableName, "c");
- writeFlush(c, tableName, "d");
-
- Assert.assertEquals(3, FunctionalTestUtils.countRFiles(c, tableName));
-
- // drop files that start with A
- CompactionStrategyConfig csConfig = new CompactionStrategyConfig(TestCompactionStrategy.class.getName());
- csConfig.setOptions(ImmutableMap.of("inputPrefix", "F"));
-
- IteratorSetting iterConf = new IteratorSetting(21, "myregex", RegExFilter.class);
- RegExFilter.setRegexs(iterConf, "a|c", null, null, null, false);
-
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig).setIterators(Arrays.asList(iterConf)));
-
- // compaction strategy should only be applied to one file. If its applied to both, then row 'b' would be dropped by filter.
- Assert.assertEquals(ImmutableSet.of("a", "b", "c"), getRows(c, tableName));
-
- Assert.assertEquals(2, FunctionalTestUtils.countRFiles(c, tableName));
-
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
-
- // ensure that iterator is not applied
- Assert.assertEquals(ImmutableSet.of("a", "b", "c"), getRows(c, tableName));
-
- Assert.assertEquals(1, FunctionalTestUtils.countRFiles(c, tableName));
- }
-
- @Test
- public void testFileSize() throws Exception {
- Connector c = getConnector();
-
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
-
- // write random data because its very unlikely it will compress
- writeRandomValue(c, tableName, 1 << 16);
- writeRandomValue(c, tableName, 1 << 16);
-
- writeRandomValue(c, tableName, 1 << 9);
- writeRandomValue(c, tableName, 1 << 7);
- writeRandomValue(c, tableName, 1 << 6);
-
- Assert.assertEquals(5, FunctionalTestUtils.countRFiles(c, tableName));
-
- CompactionStrategyConfig csConfig = new CompactionStrategyConfig(SizeCompactionStrategy.class.getName());
- csConfig.setOptions(ImmutableMap.of("size", "" + (1 << 15)));
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
-
- Assert.assertEquals(3, FunctionalTestUtils.countRFiles(c, tableName));
-
- csConfig = new CompactionStrategyConfig(SizeCompactionStrategy.class.getName());
- csConfig.setOptions(ImmutableMap.of("size", "" + (1 << 17)));
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true).setCompactionStrategy(csConfig));
-
- Assert.assertEquals(1, FunctionalTestUtils.countRFiles(c, tableName));
-
- }
-
- @Test
- public void testConcurrent() throws Exception {
- // two compactions without iterators or strategy should be able to run concurrently
-
- Connector c = getConnector();
-
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
-
- // write random data because its very unlikely it will compress
- writeRandomValue(c, tableName, 1 << 16);
- writeRandomValue(c, tableName, 1 << 16);
-
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(false));
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
-
- Assert.assertEquals(1, FunctionalTestUtils.countRFiles(c, tableName));
-
- writeRandomValue(c, tableName, 1 << 16);
-
- IteratorSetting iterConfig = new IteratorSetting(30, SlowIterator.class);
- SlowIterator.setSleepTime(iterConfig, 1000);
-
- long t1 = System.currentTimeMillis();
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(false).setIterators(Arrays.asList(iterConfig)));
- try {
- // this compaction should fail because previous one set iterators
- c.tableOperations().compact(tableName, new CompactionConfig().setWait(true));
- if (System.currentTimeMillis() - t1 < 2000)
- Assert.fail("Expected compaction to fail because another concurrent compaction set iterators");
- } catch (AccumuloException e) {}
- }
-
- void writeRandomValue(Connector c, String tableName, int size) throws Exception {
- Random rand = new Random();
-
- byte data1[] = new byte[size];
- rand.nextBytes(data1);
-
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-
- Mutation m1 = new Mutation("r" + rand.nextInt(909090));
- m1.put("data", "bl0b", new Value(data1));
-
- bw.addMutation(m1);
- bw.close();
- c.tableOperations().flush(tableName, null, null, true);
- }
-
- private Set<String> getRows(Connector c, String tableName) throws TableNotFoundException {
- Set<String> rows = new HashSet<String>();
- Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
-
- for (Entry<Key,Value> entry : scanner)
- rows.add(entry.getKey().getRowData().toString());
- return rows;
-
- }
-
- private void writeFlush(Connector conn, String tablename, String row) throws Exception {
- BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
- Mutation m = new Mutation(row);
- m.put("", "", "");
- bw.addMutation(m);
- bw.close();
- conn.tableOperations().flush(tablename, null, null, true);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/UsersIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/UsersIT.java b/test/src/test/java/org/apache/accumulo/test/UsersIT.java
deleted file mode 100644
index 131f042..0000000
--- a/test/src/test/java/org/apache/accumulo/test/UsersIT.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.util.Set;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.SecurityErrorCode;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.junit.Test;
-
-public class UsersIT extends AccumuloClusterHarness {
-
- @Test
- public void testCreateExistingUser() throws Exception {
- ClusterUser user0 = getUser(0);
- Connector conn = getConnector();
- Set<String> currentUsers = conn.securityOperations().listLocalUsers();
-
- // Ensure that the user exists
- if (!currentUsers.contains(user0.getPrincipal())) {
- PasswordToken token = null;
- if (!getCluster().getClientConfig().getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- token = new PasswordToken(user0.getPassword());
- }
- conn.securityOperations().createLocalUser(user0.getPrincipal(), token);
- }
-
- try {
- conn.securityOperations().createLocalUser(user0.getPrincipal(), new PasswordToken("better_fail"));
- fail("Creating a user that already exists should throw an exception");
- } catch (AccumuloSecurityException e) {
- assertTrue("Expected USER_EXISTS error", SecurityErrorCode.USER_EXISTS == e.getSecurityErrorCode());
- String msg = e.getMessage();
- assertTrue("Error message didn't contain principal: '" + msg + "'", msg.contains(user0.getPrincipal()));
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java b/test/src/test/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java
deleted file mode 100644
index 6a90730..0000000
--- a/test/src/test/java/org/apache/accumulo/test/VerifySerialRecoveryIT.java
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.test.functional.FunctionalTestUtils;
-import org.apache.accumulo.tserver.TabletServer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class VerifySerialRecoveryIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "3s");
- cfg.setProperty(Property.TSERV_ASSIGNMENT_MAXCONCURRENT, "20");
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Test(timeout = 4 * 60 * 1000)
- public void testSerializedRecovery() throws Exception {
- // make a table with many splits
- String tableName = getUniqueNames(1)[0];
- Connector c = getConnector();
- c.tableOperations().create(tableName);
- SortedSet<Text> splits = new TreeSet<Text>();
- for (int i = 0; i < 200; i++) {
- splits.add(new Text(AssignmentThreadsIT.randomHex(8)));
- }
- c.tableOperations().addSplits(tableName, splits);
- // load data to give the recovery something to do
- BatchWriter bw = c.createBatchWriter(tableName, null);
- for (int i = 0; i < 50000; i++) {
- Mutation m = new Mutation(AssignmentThreadsIT.randomHex(8));
- m.put("", "", "");
- bw.addMutation(m);
- }
- bw.close();
- // kill the tserver
- for (ProcessReference ref : getCluster().getProcesses().get(ServerType.TABLET_SERVER))
- getCluster().killProcess(ServerType.TABLET_SERVER, ref);
- final Process ts = cluster.exec(TabletServer.class);
-
- // wait for recovery
- Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator());
- assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
- ts.waitFor();
- String result = FunctionalTestUtils.readAll(cluster, TabletServer.class, ts);
- for (String line : result.split("\n")) {
- System.out.println(line);
- }
- // walk through the output, verifying that only a single normal recovery was running at one time
- boolean started = false;
- int recoveries = 0;
- for (String line : result.split("\n")) {
- // ignore metadata tables
- if (line.contains("!0") || line.contains("+r"))
- continue;
- if (line.contains("Starting Write-Ahead Log")) {
- assertFalse(started);
- started = true;
- recoveries++;
- }
- if (line.contains("Write-Ahead Log recovery complete")) {
- assertTrue(started);
- started = false;
- }
- }
- assertFalse(started);
- assertTrue(recoveries > 0);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/VolumeChooserIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/VolumeChooserIT.java b/test/src/test/java/org/apache/accumulo/test/VolumeChooserIT.java
deleted file mode 100644
index c2dee9f..0000000
--- a/test/src/test/java/org/apache/accumulo/test/VolumeChooserIT.java
+++ /dev/null
@@ -1,392 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.fs.PerTableVolumeChooser;
-import org.apache.accumulo.server.fs.PreferredVolumeChooser;
-import org.apache.accumulo.server.fs.RandomVolumeChooser;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-/**
- *
- */
-public class VolumeChooserIT extends ConfigurableMacBase {
-
- private static final Text EMPTY = new Text();
- private static final Value EMPTY_VALUE = new Value(new byte[] {});
- private File volDirBase;
- private Path v1, v2, v3, v4;
- private String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
- private String namespace1;
- private String namespace2;
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 30;
- };
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- // Get 2 tablet servers
- cfg.setNumTservers(2);
- namespace1 = "ns_" + getUniqueNames(2)[0];
- namespace2 = "ns_" + getUniqueNames(2)[1];
-
- // Set the general volume chooser to the PerTableVolumeChooser so that different choosers can be specified
- Map<String,String> siteConfig = new HashMap<String,String>();
- siteConfig.put(Property.GENERAL_VOLUME_CHOOSER.getKey(), PerTableVolumeChooser.class.getName());
- cfg.setSiteConfig(siteConfig);
-
- // Set up 4 different volume paths
- File baseDir = cfg.getDir();
- volDirBase = new File(baseDir, "volumes");
- File v1f = new File(volDirBase, "v1");
- File v2f = new File(volDirBase, "v2");
- File v3f = new File(volDirBase, "v3");
- File v4f = new File(volDirBase, "v4");
- v1 = new Path("file://" + v1f.getAbsolutePath());
- v2 = new Path("file://" + v2f.getAbsolutePath());
- v3 = new Path("file://" + v3f.getAbsolutePath());
- v4 = new Path("file://" + v4f.getAbsolutePath());
-
- // Only add volumes 1, 2, and 4 to the list of instance volumes to have one volume that isn't in the options list when they are choosing
- cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString() + "," + v4.toString());
-
- // use raw local file system so walogs sync and flush will work
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
-
- super.configure(cfg, hadoopCoreSite);
-
- }
-
- public void addSplits(Connector connector, String tableName) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
- // Add 10 splits to the table
- SortedSet<Text> partitions = new TreeSet<Text>();
- for (String s : "b,e,g,j,l,o,q,t,v,y".split(","))
- partitions.add(new Text(s));
- connector.tableOperations().addSplits(tableName, partitions);
- }
-
- public void writeAndReadData(Connector connector, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
- // Write some data to the table
- BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
- for (String s : rows) {
- Mutation m = new Mutation(new Text(s));
- m.put(EMPTY, EMPTY, EMPTY_VALUE);
- bw.addMutation(m);
- }
- bw.close();
-
- // Write the data to disk, read it back
- connector.tableOperations().flush(tableName, null, null, true);
- Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
- int i = 0;
- for (Entry<Key,Value> entry : scanner) {
- assertEquals("Data read is not data written", rows[i++], entry.getKey().getRow().toString());
- }
- }
-
- public void verifyVolumes(Connector connector, String tableName, Range tableRange, String vol) throws TableNotFoundException {
- // Verify the new files are written to the Volumes specified
- ArrayList<String> volumes = new ArrayList<String>();
- for (String s : vol.split(","))
- volumes.add(s);
-
- Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- scanner.setRange(tableRange);
- scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
- int fileCount = 0;
- for (Entry<Key,Value> entry : scanner) {
- boolean inVolume = false;
- for (String volume : volumes) {
- if (entry.getKey().getColumnQualifier().toString().contains(volume))
- inVolume = true;
- }
- assertTrue("Data not written to the correct volumes", inVolume);
- fileCount++;
- }
- assertEquals("Wrong number of files", 11, fileCount);
- }
-
- // Test that uses two tables with 10 split points each. They each use the PreferredVolumeChooser to choose volumes.
- @Test
- public void twoTablesPreferredVolumeChooser() throws Exception {
- log.info("Starting twoTablesPreferredVolumeChooser");
-
- // Create namespace
- Connector connector = getConnector();
- connector.namespaceOperations().create(namespace1);
-
- // Set properties on the namespace
- String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
- String volume = PreferredVolumeChooser.class.getName();
- connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
-
- propertyName = "table.custom.preferredVolumes";
- volume = v2.toString();
- connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
-
- // Create table1 on namespace1
- String tableName = namespace1 + ".1";
- connector.tableOperations().create(tableName);
- String tableID = connector.tableOperations().tableIdMap().get(tableName);
-
- // Add 10 splits to the table
- addSplits(connector, tableName);
- // Write some data to the table
- writeAndReadData(connector, tableName);
- // Verify the new files are written to the Volumes specified
- verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), volume);
-
- connector.namespaceOperations().create(namespace2);
-
- // Set properties on the namespace
- propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
- volume = PreferredVolumeChooser.class.getName();
- connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
-
- propertyName = "table.custom.preferredVolumes";
- volume = v1.toString();
- connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
-
- // Create table2 on namespace2
- String tableName2 = namespace2 + ".1";
-
- connector.tableOperations().create(tableName2);
- String tableID2 = connector.tableOperations().tableIdMap().get(tableName2);
-
- // Add 10 splits to the table
- addSplits(connector, tableName2);
- // Write some data to the table
- writeAndReadData(connector, tableName2);
- // Verify the new files are written to the Volumes specified
- verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), volume);
- }
-
- // Test that uses two tables with 10 split points each. They each use the RandomVolumeChooser to choose volumes.
- @Test
- public void twoTablesRandomVolumeChooser() throws Exception {
- log.info("Starting twoTablesRandomVolumeChooser()");
-
- // Create namespace
- Connector connector = getConnector();
- connector.namespaceOperations().create(namespace1);
-
- // Set properties on the namespace
- String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
- String volume = RandomVolumeChooser.class.getName();
- connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
-
- // Create table1 on namespace1
- String tableName = namespace1 + ".1";
- connector.tableOperations().create(tableName);
- String tableID = connector.tableOperations().tableIdMap().get(tableName);
-
- // Add 10 splits to the table
- addSplits(connector, tableName);
- // Write some data to the table
- writeAndReadData(connector, tableName);
- // Verify the new files are written to the Volumes specified
-
- verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
-
- connector.namespaceOperations().create(namespace2);
-
- // Set properties on the namespace
- propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
- volume = RandomVolumeChooser.class.getName();
- connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
-
- // Create table2 on namespace2
- String tableName2 = namespace2 + ".1";
- connector.tableOperations().create(tableName2);
- String tableID2 = connector.tableOperations().tableIdMap().get(tableName);
-
- // / Add 10 splits to the table
- addSplits(connector, tableName2);
- // Write some data to the table
- writeAndReadData(connector, tableName2);
- // Verify the new files are written to the Volumes specified
- verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), v1.toString() + "," + v2.toString() + "," + v4.toString());
- }
-
- // Test that uses two tables with 10 split points each. The first uses the RandomVolumeChooser and the second uses the
- // StaticVolumeChooser to choose volumes.
- @Test
- public void twoTablesDiffChoosers() throws Exception {
- log.info("Starting twoTablesDiffChoosers");
-
- // Create namespace
- Connector connector = getConnector();
- connector.namespaceOperations().create(namespace1);
-
- // Set properties on the namespace
- String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
- String volume = RandomVolumeChooser.class.getName();
- connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
-
- // Create table1 on namespace1
- String tableName = namespace1 + ".1";
- connector.tableOperations().create(tableName);
- String tableID = connector.tableOperations().tableIdMap().get(tableName);
-
- // Add 10 splits to the table
- addSplits(connector, tableName);
- // Write some data to the table
- writeAndReadData(connector, tableName);
- // Verify the new files are written to the Volumes specified
-
- verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
-
- connector.namespaceOperations().create(namespace2);
-
- // Set properties on the namespace
- propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
- volume = PreferredVolumeChooser.class.getName();
- connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
-
- propertyName = "table.custom.preferredVolumes";
- volume = v1.toString();
- connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
-
- // Create table2 on namespace2
- String tableName2 = namespace2 + ".1";
- connector.tableOperations().create(tableName2);
- String tableID2 = connector.tableOperations().tableIdMap().get(tableName2);
-
- // Add 10 splits to the table
- addSplits(connector, tableName2);
- // Write some data to the table
- writeAndReadData(connector, tableName2);
- // Verify the new files are written to the Volumes specified
- verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), volume);
- }
-
- // Test that uses one table with 10 split points each. It uses the StaticVolumeChooser, but no preferred volume is specified. This means that the volume
- // is chosen randomly from all instance volumes.
- @Test
- public void missingVolumePreferredVolumeChooser() throws Exception {
- log.info("Starting missingVolumePreferredVolumeChooser");
-
- // Create namespace
- Connector connector = getConnector();
- connector.namespaceOperations().create(namespace1);
-
- // Set properties on the namespace
- String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
- String volume = PreferredVolumeChooser.class.getName();
- connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
-
- // Create table1 on namespace1
- String tableName = namespace1 + ".1";
- connector.tableOperations().create(tableName);
- String tableID = connector.tableOperations().tableIdMap().get(tableName);
-
- // Add 10 splits to the table
- addSplits(connector, tableName);
- // Write some data to the table
- writeAndReadData(connector, tableName);
- // Verify the new files are written to the Volumes specified
- verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
- }
-
- // Test that uses one table with 10 split points each. It uses the PreferredVolumeChooser, but preferred volume is not an instance volume. This means that the
- // volume is chosen randomly from all instance volumes
- @Test
- public void notInstancePreferredVolumeChooser() throws Exception {
- log.info("Starting notInstancePreferredVolumeChooser");
-
- // Create namespace
- Connector connector = getConnector();
- connector.namespaceOperations().create(namespace1);
-
- // Set properties on the namespace
- String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
- String volume = PreferredVolumeChooser.class.getName();
- connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
-
- propertyName = "table.custom.preferredVolumes";
- volume = v3.toString();
- connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
-
- // Create table1 on namespace1
- String tableName = namespace1 + ".1";
- connector.tableOperations().create(tableName);
- String tableID = connector.tableOperations().tableIdMap().get(tableName);
-
- // Add 10 splits to the table
- addSplits(connector, tableName);
- // Write some data to the table
- writeAndReadData(connector, tableName);
- // Verify the new files are written to the Volumes specified
- verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
- }
-
- // Test that uses one table with 10 split points each. It does not specify a specific chooser, so the volume is chosen randomly from all instance volumes.
- @Test
- public void chooserNotSpecified() throws Exception {
- log.info("Starting chooserNotSpecified");
-
- // Create a table
- Connector connector = getConnector();
- String tableName = getUniqueNames(2)[0];
- connector.tableOperations().create(tableName);
- String tableID = connector.tableOperations().tableIdMap().get(tableName);
-
- // Add 10 splits to the table
- addSplits(connector, tableName);
- // Write some data to the table
- writeAndReadData(connector, tableName);
-
- // Verify the new files are written to the Volumes specified
- verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
- }
-
-}
[36/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java b/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
new file mode 100644
index 0000000..c2dee9f
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeChooserIT.java
@@ -0,0 +1,392 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.fs.PerTableVolumeChooser;
+import org.apache.accumulo.server.fs.PreferredVolumeChooser;
+import org.apache.accumulo.server.fs.RandomVolumeChooser;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class VolumeChooserIT extends ConfigurableMacBase {
+
+ private static final Text EMPTY = new Text();
+ private static final Value EMPTY_VALUE = new Value(new byte[] {});
+ private File volDirBase;
+ private Path v1, v2, v3, v4;
+ private String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
+ private String namespace1;
+ private String namespace2;
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 30;
+ };
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ // Get 2 tablet servers
+ cfg.setNumTservers(2);
+ namespace1 = "ns_" + getUniqueNames(2)[0];
+ namespace2 = "ns_" + getUniqueNames(2)[1];
+
+ // Set the general volume chooser to the PerTableVolumeChooser so that different choosers can be specified
+ Map<String,String> siteConfig = new HashMap<String,String>();
+ siteConfig.put(Property.GENERAL_VOLUME_CHOOSER.getKey(), PerTableVolumeChooser.class.getName());
+ cfg.setSiteConfig(siteConfig);
+
+ // Set up 4 different volume paths
+ File baseDir = cfg.getDir();
+ volDirBase = new File(baseDir, "volumes");
+ File v1f = new File(volDirBase, "v1");
+ File v2f = new File(volDirBase, "v2");
+ File v3f = new File(volDirBase, "v3");
+ File v4f = new File(volDirBase, "v4");
+ v1 = new Path("file://" + v1f.getAbsolutePath());
+ v2 = new Path("file://" + v2f.getAbsolutePath());
+ v3 = new Path("file://" + v3f.getAbsolutePath());
+ v4 = new Path("file://" + v4f.getAbsolutePath());
+
+ // Only add volumes 1, 2, and 4 to the list of instance volumes to have one volume that isn't in the options list when they are choosing
+ cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString() + "," + v4.toString());
+
+ // use raw local file system so walogs sync and flush will work
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+
+ super.configure(cfg, hadoopCoreSite);
+
+ }
+
+ public void addSplits(Connector connector, String tableName) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+ // Add 10 splits to the table
+ SortedSet<Text> partitions = new TreeSet<Text>();
+ for (String s : "b,e,g,j,l,o,q,t,v,y".split(","))
+ partitions.add(new Text(s));
+ connector.tableOperations().addSplits(tableName, partitions);
+ }
+
+ public void writeAndReadData(Connector connector, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ // Write some data to the table
+ BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
+ for (String s : rows) {
+ Mutation m = new Mutation(new Text(s));
+ m.put(EMPTY, EMPTY, EMPTY_VALUE);
+ bw.addMutation(m);
+ }
+ bw.close();
+
+ // Write the data to disk, read it back
+ connector.tableOperations().flush(tableName, null, null, true);
+ Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+ int i = 0;
+ for (Entry<Key,Value> entry : scanner) {
+ assertEquals("Data read is not data written", rows[i++], entry.getKey().getRow().toString());
+ }
+ }
+
+ public void verifyVolumes(Connector connector, String tableName, Range tableRange, String vol) throws TableNotFoundException {
+ // Verify the new files are written to the Volumes specified
+ ArrayList<String> volumes = new ArrayList<String>();
+ for (String s : vol.split(","))
+ volumes.add(s);
+
+ Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ scanner.setRange(tableRange);
+ scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+ int fileCount = 0;
+ for (Entry<Key,Value> entry : scanner) {
+ boolean inVolume = false;
+ for (String volume : volumes) {
+ if (entry.getKey().getColumnQualifier().toString().contains(volume))
+ inVolume = true;
+ }
+ assertTrue("Data not written to the correct volumes", inVolume);
+ fileCount++;
+ }
+ assertEquals("Wrong number of files", 11, fileCount);
+ }
+
+ // Test that uses two tables with 10 split points each. They each use the PreferredVolumeChooser to choose volumes.
+ @Test
+ public void twoTablesPreferredVolumeChooser() throws Exception {
+ log.info("Starting twoTablesPreferredVolumeChooser");
+
+ // Create namespace
+ Connector connector = getConnector();
+ connector.namespaceOperations().create(namespace1);
+
+ // Set properties on the namespace
+ String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+ String volume = PreferredVolumeChooser.class.getName();
+ connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+ propertyName = "table.custom.preferredVolumes";
+ volume = v2.toString();
+ connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+ // Create table1 on namespace1
+ String tableName = namespace1 + ".1";
+ connector.tableOperations().create(tableName);
+ String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+ // Add 10 splits to the table
+ addSplits(connector, tableName);
+ // Write some data to the table
+ writeAndReadData(connector, tableName);
+ // Verify the new files are written to the Volumes specified
+ verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), volume);
+
+ connector.namespaceOperations().create(namespace2);
+
+ // Set properties on the namespace
+ propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+ volume = PreferredVolumeChooser.class.getName();
+ connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
+
+ propertyName = "table.custom.preferredVolumes";
+ volume = v1.toString();
+ connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
+
+ // Create table2 on namespace2
+ String tableName2 = namespace2 + ".1";
+
+ connector.tableOperations().create(tableName2);
+ String tableID2 = connector.tableOperations().tableIdMap().get(tableName2);
+
+ // Add 10 splits to the table
+ addSplits(connector, tableName2);
+ // Write some data to the table
+ writeAndReadData(connector, tableName2);
+ // Verify the new files are written to the Volumes specified
+ verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), volume);
+ }
+
+ // Test that uses two tables with 10 split points each. They each use the RandomVolumeChooser to choose volumes.
+ @Test
+ public void twoTablesRandomVolumeChooser() throws Exception {
+ log.info("Starting twoTablesRandomVolumeChooser()");
+
+ // Create namespace
+ Connector connector = getConnector();
+ connector.namespaceOperations().create(namespace1);
+
+ // Set properties on the namespace
+ String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+ String volume = RandomVolumeChooser.class.getName();
+ connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+ // Create table1 on namespace1
+ String tableName = namespace1 + ".1";
+ connector.tableOperations().create(tableName);
+ String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+ // Add 10 splits to the table
+ addSplits(connector, tableName);
+ // Write some data to the table
+ writeAndReadData(connector, tableName);
+ // Verify the new files are written to the Volumes specified
+
+ verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
+
+ connector.namespaceOperations().create(namespace2);
+
+ // Set properties on the namespace
+ propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+ volume = RandomVolumeChooser.class.getName();
+ connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
+
+ // Create table2 on namespace2
+ String tableName2 = namespace2 + ".1";
+ connector.tableOperations().create(tableName2);
+ String tableID2 = connector.tableOperations().tableIdMap().get(tableName);
+
+ // / Add 10 splits to the table
+ addSplits(connector, tableName2);
+ // Write some data to the table
+ writeAndReadData(connector, tableName2);
+ // Verify the new files are written to the Volumes specified
+ verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), v1.toString() + "," + v2.toString() + "," + v4.toString());
+ }
+
+ // Test that uses two tables with 10 split points each. The first uses the RandomVolumeChooser and the second uses the
+ // StaticVolumeChooser to choose volumes.
+ @Test
+ public void twoTablesDiffChoosers() throws Exception {
+ log.info("Starting twoTablesDiffChoosers");
+
+ // Create namespace
+ Connector connector = getConnector();
+ connector.namespaceOperations().create(namespace1);
+
+ // Set properties on the namespace
+ String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+ String volume = RandomVolumeChooser.class.getName();
+ connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+ // Create table1 on namespace1
+ String tableName = namespace1 + ".1";
+ connector.tableOperations().create(tableName);
+ String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+ // Add 10 splits to the table
+ addSplits(connector, tableName);
+ // Write some data to the table
+ writeAndReadData(connector, tableName);
+ // Verify the new files are written to the Volumes specified
+
+ verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
+
+ connector.namespaceOperations().create(namespace2);
+
+ // Set properties on the namespace
+ propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+ volume = PreferredVolumeChooser.class.getName();
+ connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
+
+ propertyName = "table.custom.preferredVolumes";
+ volume = v1.toString();
+ connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
+
+ // Create table2 on namespace2
+ String tableName2 = namespace2 + ".1";
+ connector.tableOperations().create(tableName2);
+ String tableID2 = connector.tableOperations().tableIdMap().get(tableName2);
+
+ // Add 10 splits to the table
+ addSplits(connector, tableName2);
+ // Write some data to the table
+ writeAndReadData(connector, tableName2);
+ // Verify the new files are written to the Volumes specified
+ verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), volume);
+ }
+
+ // Test that uses one table with 10 split points each. It uses the StaticVolumeChooser, but no preferred volume is specified. This means that the volume
+ // is chosen randomly from all instance volumes.
+ @Test
+ public void missingVolumePreferredVolumeChooser() throws Exception {
+ log.info("Starting missingVolumePreferredVolumeChooser");
+
+ // Create namespace
+ Connector connector = getConnector();
+ connector.namespaceOperations().create(namespace1);
+
+ // Set properties on the namespace
+ String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+ String volume = PreferredVolumeChooser.class.getName();
+ connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+ // Create table1 on namespace1
+ String tableName = namespace1 + ".1";
+ connector.tableOperations().create(tableName);
+ String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+ // Add 10 splits to the table
+ addSplits(connector, tableName);
+ // Write some data to the table
+ writeAndReadData(connector, tableName);
+ // Verify the new files are written to the Volumes specified
+ verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
+ }
+
+ // Test that uses one table with 10 split points each. It uses the PreferredVolumeChooser, but preferred volume is not an instance volume. This means that the
+ // volume is chosen randomly from all instance volumes
+ @Test
+ public void notInstancePreferredVolumeChooser() throws Exception {
+ log.info("Starting notInstancePreferredVolumeChooser");
+
+ // Create namespace
+ Connector connector = getConnector();
+ connector.namespaceOperations().create(namespace1);
+
+ // Set properties on the namespace
+ String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+ String volume = PreferredVolumeChooser.class.getName();
+ connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+ propertyName = "table.custom.preferredVolumes";
+ volume = v3.toString();
+ connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+ // Create table1 on namespace1
+ String tableName = namespace1 + ".1";
+ connector.tableOperations().create(tableName);
+ String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+ // Add 10 splits to the table
+ addSplits(connector, tableName);
+ // Write some data to the table
+ writeAndReadData(connector, tableName);
+ // Verify the new files are written to the Volumes specified
+ verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
+ }
+
+ // Test that uses one table with 10 split points each. It does not specify a specific chooser, so the volume is chosen randomly from all instance volumes.
+ @Test
+ public void chooserNotSpecified() throws Exception {
+ log.info("Starting chooserNotSpecified");
+
+ // Create a table
+ Connector connector = getConnector();
+ String tableName = getUniqueNames(2)[0];
+ connector.tableOperations().create(tableName);
+ String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+ // Add 10 splits to the table
+ addSplits(connector, tableName);
+ // Write some data to the table
+ writeAndReadData(connector, tableName);
+
+ // Verify the new files are written to the Volumes specified
+ verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
new file mode 100644
index 0000000..c25370d
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
@@ -0,0 +1,568 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.admin.DiskUsage;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.ZooReader;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.init.Initialize;
+import org.apache.accumulo.server.log.WalStateManager;
+import org.apache.accumulo.server.log.WalStateManager.WalState;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class VolumeIT extends ConfigurableMacBase {
+
+ private static final Text EMPTY = new Text();
+ private static final Value EMPTY_VALUE = new Value(new byte[] {});
+ private File volDirBase;
+ private Path v1, v2;
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 5 * 60;
+ }
+
+ @SuppressWarnings("deprecation")
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ File baseDir = cfg.getDir();
+ volDirBase = new File(baseDir, "volumes");
+ File v1f = new File(volDirBase, "v1");
+ File v2f = new File(volDirBase, "v2");
+ v1 = new Path("file://" + v1f.getAbsolutePath());
+ v2 = new Path("file://" + v2f.getAbsolutePath());
+
+ // Run MAC on two locations in the local file system
+ URI v1Uri = v1.toUri();
+ cfg.setProperty(Property.INSTANCE_DFS_DIR, v1Uri.getPath());
+ cfg.setProperty(Property.INSTANCE_DFS_URI, v1Uri.getScheme() + v1Uri.getHost());
+ cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString());
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+
+ // use raw local file system so walogs sync and flush will work
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+
+ super.configure(cfg, hadoopCoreSite);
+ }
+
+ @Test
+ public void test() throws Exception {
+ // create a table
+ Connector connector = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ connector.tableOperations().create(tableName);
+ SortedSet<Text> partitions = new TreeSet<Text>();
+ // with some splits
+ for (String s : "d,m,t".split(","))
+ partitions.add(new Text(s));
+ connector.tableOperations().addSplits(tableName, partitions);
+ // scribble over the splits
+ BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
+ String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
+ for (String s : rows) {
+ Mutation m = new Mutation(new Text(s));
+ m.put(EMPTY, EMPTY, EMPTY_VALUE);
+ bw.addMutation(m);
+ }
+ bw.close();
+ // write the data to disk, read it back
+ connector.tableOperations().flush(tableName, null, null, true);
+ Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+ int i = 0;
+ for (Entry<Key,Value> entry : scanner) {
+ assertEquals(rows[i++], entry.getKey().getRow().toString());
+ }
+ // verify the new files are written to the different volumes
+ scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ scanner.setRange(new Range("1", "1<"));
+ scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+ int fileCount = 0;
+
+ for (Entry<Key,Value> entry : scanner) {
+ boolean inV1 = entry.getKey().getColumnQualifier().toString().contains(v1.toString());
+ boolean inV2 = entry.getKey().getColumnQualifier().toString().contains(v2.toString());
+ assertTrue(inV1 || inV2);
+ fileCount++;
+ }
+ assertEquals(4, fileCount);
+ List<DiskUsage> diskUsage = connector.tableOperations().getDiskUsage(Collections.singleton(tableName));
+ assertEquals(1, diskUsage.size());
+ long usage = diskUsage.get(0).getUsage().longValue();
+ System.out.println("usage " + usage);
+ assertTrue(usage > 700 && usage < 800);
+ }
+
+ private void verifyData(List<String> expected, Scanner createScanner) {
+
+ List<String> actual = new ArrayList<String>();
+
+ for (Entry<Key,Value> entry : createScanner) {
+ Key k = entry.getKey();
+ actual.add(k.getRow() + ":" + k.getColumnFamily() + ":" + k.getColumnQualifier() + ":" + entry.getValue());
+ }
+
+ Collections.sort(expected);
+ Collections.sort(actual);
+
+ Assert.assertEquals(expected, actual);
+ }
+
+ @Test
+ public void testRelativePaths() throws Exception {
+
+ List<String> expected = new ArrayList<String>();
+
+ Connector connector = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
+
+ String tableId = connector.tableOperations().tableIdMap().get(tableName);
+
+ SortedSet<Text> partitions = new TreeSet<Text>();
+ // with some splits
+ for (String s : "c,g,k,p,s,v".split(","))
+ partitions.add(new Text(s));
+
+ connector.tableOperations().addSplits(tableName, partitions);
+
+ BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
+
+ // create two files in each tablet
+
+ String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
+ for (String s : rows) {
+ Mutation m = new Mutation(s);
+ m.put("cf1", "cq1", "1");
+ bw.addMutation(m);
+ expected.add(s + ":cf1:cq1:1");
+ }
+
+ bw.flush();
+ connector.tableOperations().flush(tableName, null, null, true);
+
+ for (String s : rows) {
+ Mutation m = new Mutation(s);
+ m.put("cf1", "cq1", "2");
+ bw.addMutation(m);
+ expected.add(s + ":cf1:cq1:2");
+ }
+
+ bw.close();
+ connector.tableOperations().flush(tableName, null, null, true);
+
+ verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
+
+ connector.tableOperations().offline(tableName, true);
+
+ connector.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
+
+ Scanner metaScanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+ metaScanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
+
+ BatchWriter mbw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+
+ for (Entry<Key,Value> entry : metaScanner) {
+ String cq = entry.getKey().getColumnQualifier().toString();
+ if (cq.startsWith(v1.toString())) {
+ Path path = new Path(cq);
+ String relPath = "/" + path.getParent().getName() + "/" + path.getName();
+ Mutation fileMut = new Mutation(entry.getKey().getRow());
+ fileMut.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
+ fileMut.put(entry.getKey().getColumnFamily().toString(), relPath, entry.getValue().toString());
+ mbw.addMutation(fileMut);
+ }
+ }
+
+ mbw.close();
+
+ connector.tableOperations().online(tableName, true);
+
+ verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
+
+ connector.tableOperations().compact(tableName, null, null, true, true);
+
+ verifyData(expected, connector.createScanner(tableName, Authorizations.EMPTY));
+
+ for (Entry<Key,Value> entry : metaScanner) {
+ String cq = entry.getKey().getColumnQualifier().toString();
+ Path path = new Path(cq);
+ Assert.assertTrue("relative path not deleted " + path.toString(), path.depth() > 2);
+ }
+
+ }
+
+ @Test
+ public void testAddVolumes() throws Exception {
+
+ String[] tableNames = getUniqueNames(2);
+
+ // grab this before shutting down cluster
+ String uuid = new ZooKeeperInstance(cluster.getClientConfig()).getInstanceID();
+
+ verifyVolumesUsed(tableNames[0], false, v1, v2);
+
+ Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+ cluster.stop();
+
+ Configuration conf = new Configuration(false);
+ conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
+
+ File v3f = new File(volDirBase, "v3");
+ assertTrue(v3f.mkdir() || v3f.isDirectory());
+ Path v3 = new Path("file://" + v3f.getAbsolutePath());
+
+ conf.set(Property.INSTANCE_VOLUMES.getKey(), v1.toString() + "," + v2.toString() + "," + v3.toString());
+ BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
+ conf.writeXml(fos);
+ fos.close();
+
+ // initialize volume
+ Assert.assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor());
+
+ // check that all volumes are initialized
+ for (Path volumePath : Arrays.asList(v1, v2, v3)) {
+ FileSystem fs = volumePath.getFileSystem(CachedConfiguration.getInstance());
+ Path vp = new Path(volumePath, ServerConstants.INSTANCE_ID_DIR);
+ FileStatus[] iids = fs.listStatus(vp);
+ Assert.assertEquals(1, iids.length);
+ Assert.assertEquals(uuid, iids[0].getPath().getName());
+ }
+
+ // start cluster and verify that new volume is used
+ cluster.start();
+
+ verifyVolumesUsed(tableNames[1], false, v1, v2, v3);
+ }
+
+ @Test
+ public void testNonConfiguredVolumes() throws Exception {
+
+ String[] tableNames = getUniqueNames(2);
+
+ // grab this before shutting down cluster
+ String uuid = new ZooKeeperInstance(cluster.getClientConfig()).getInstanceID();
+
+ verifyVolumesUsed(tableNames[0], false, v1, v2);
+
+ Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+ cluster.stop();
+
+ Configuration conf = new Configuration(false);
+ conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
+
+ File v3f = new File(volDirBase, "v3");
+ assertTrue(v3f.mkdir() || v3f.isDirectory());
+ Path v3 = new Path("file://" + v3f.getAbsolutePath());
+
+ conf.set(Property.INSTANCE_VOLUMES.getKey(), v2.toString() + "," + v3.toString());
+ BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
+ conf.writeXml(fos);
+ fos.close();
+
+ // initialize volume
+ Assert.assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor());
+
+ // check that all volumes are initialized
+ for (Path volumePath : Arrays.asList(v1, v2, v3)) {
+ FileSystem fs = volumePath.getFileSystem(CachedConfiguration.getInstance());
+ Path vp = new Path(volumePath, ServerConstants.INSTANCE_ID_DIR);
+ FileStatus[] iids = fs.listStatus(vp);
+ Assert.assertEquals(1, iids.length);
+ Assert.assertEquals(uuid, iids[0].getPath().getName());
+ }
+
+ // start cluster and verify that new volume is used
+ cluster.start();
+
+ // Make sure we can still read the tables (tableNames[0] is very likely to have a file still on v1)
+ List<String> expected = new ArrayList<String>();
+ for (int i = 0; i < 100; i++) {
+ String row = String.format("%06d", i * 100 + 3);
+ expected.add(row + ":cf1:cq1:1");
+ }
+
+ verifyData(expected, getConnector().createScanner(tableNames[0], Authorizations.EMPTY));
+
+ // v1 should not have any data for tableNames[1]
+ verifyVolumesUsed(tableNames[1], false, v2, v3);
+ }
+
+ private void writeData(String tableName, Connector conn) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException,
+ MutationsRejectedException {
+ TreeSet<Text> splits = new TreeSet<Text>();
+ for (int i = 1; i < 100; i++) {
+ splits.add(new Text(String.format("%06d", i * 100)));
+ }
+
+ conn.tableOperations().create(tableName);
+ conn.tableOperations().addSplits(tableName, splits);
+
+ BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ for (int i = 0; i < 100; i++) {
+ String row = String.format("%06d", i * 100 + 3);
+ Mutation m = new Mutation(row);
+ m.put("cf1", "cq1", "1");
+ bw.addMutation(m);
+ }
+
+ bw.close();
+ }
+
+ private void verifyVolumesUsed(String tableName, boolean shouldExist, Path... paths) throws Exception {
+
+ Connector conn = getConnector();
+
+ List<String> expected = new ArrayList<String>();
+ for (int i = 0; i < 100; i++) {
+ String row = String.format("%06d", i * 100 + 3);
+ expected.add(row + ":cf1:cq1:1");
+ }
+
+ if (!conn.tableOperations().exists(tableName)) {
+ Assert.assertFalse(shouldExist);
+
+ writeData(tableName, conn);
+
+ verifyData(expected, conn.createScanner(tableName, Authorizations.EMPTY));
+
+ conn.tableOperations().flush(tableName, null, null, true);
+ }
+
+ verifyData(expected, conn.createScanner(tableName, Authorizations.EMPTY));
+
+ String tableId = conn.tableOperations().tableIdMap().get(tableName);
+ Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(metaScanner);
+ metaScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+ metaScanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
+
+ int counts[] = new int[paths.length];
+
+ outer: for (Entry<Key,Value> entry : metaScanner) {
+ String cf = entry.getKey().getColumnFamily().toString();
+ String cq = entry.getKey().getColumnQualifier().toString();
+
+ String path;
+ if (cf.equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME.toString()))
+ path = cq;
+ else
+ path = entry.getValue().toString();
+
+ for (int i = 0; i < paths.length; i++) {
+ if (path.startsWith(paths[i].toString())) {
+ counts[i]++;
+ continue outer;
+ }
+ }
+
+ Assert.fail("Unexpected volume " + path);
+ }
+
+ Instance i = conn.getInstance();
+ ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
+ WalStateManager wals = new WalStateManager(i, zk);
+ outer: for (Entry<Path,WalState> entry : wals.getAllState().entrySet()) {
+ for (Path path : paths) {
+ if (entry.getKey().toString().startsWith(path.toString())) {
+ continue outer;
+ }
+ }
+ Assert.fail("Unexpected volume " + entry.getKey());
+ }
+
+ // if a volume is chosen randomly for each tablet, then the probability that a volume will not be chosen for any tablet is ((num_volumes -
+ // 1)/num_volumes)^num_tablets. For 100 tablets and 3 volumes the probability that only 2 volumes would be chosen is 2.46e-18
+
+ int sum = 0;
+ for (int count : counts) {
+ Assert.assertTrue(count > 0);
+ sum += count;
+ }
+
+ Assert.assertEquals(200, sum);
+
+ }
+
+ @Test
+ public void testRemoveVolumes() throws Exception {
+ String[] tableNames = getUniqueNames(2);
+
+ verifyVolumesUsed(tableNames[0], false, v1, v2);
+
+ Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+ cluster.stop();
+
+ Configuration conf = new Configuration(false);
+ conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
+
+ conf.set(Property.INSTANCE_VOLUMES.getKey(), v2.toString());
+ BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
+ conf.writeXml(fos);
+ fos.close();
+
+ // start cluster and verify that volume was decommisioned
+ cluster.start();
+
+ Connector conn = cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
+ conn.tableOperations().compact(tableNames[0], null, null, true, true);
+
+ verifyVolumesUsed(tableNames[0], true, v2);
+
+ // check that root tablet is not on volume 1
+ ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
+ String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig())) + RootTable.ZROOT_TABLET_PATH;
+ String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
+ Assert.assertTrue(rootTabletDir.startsWith(v2.toString()));
+
+ conn.tableOperations().clone(tableNames[0], tableNames[1], true, new HashMap<String,String>(), new HashSet<String>());
+
+ conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
+ conn.tableOperations().flush(RootTable.NAME, null, null, true);
+
+ verifyVolumesUsed(tableNames[0], true, v2);
+ verifyVolumesUsed(tableNames[1], true, v2);
+
+ }
+
+ private void testReplaceVolume(boolean cleanShutdown) throws Exception {
+ String[] tableNames = getUniqueNames(3);
+
+ verifyVolumesUsed(tableNames[0], false, v1, v2);
+
+ // write to 2nd table, but do not flush data to disk before shutdown
+ writeData(tableNames[1], cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD)));
+
+ if (cleanShutdown)
+ Assert.assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+
+ cluster.stop();
+
+ File v1f = new File(v1.toUri());
+ File v8f = new File(new File(v1.getParent().toUri()), "v8");
+ Assert.assertTrue("Failed to rename " + v1f + " to " + v8f, v1f.renameTo(v8f));
+ Path v8 = new Path(v8f.toURI());
+
+ File v2f = new File(v2.toUri());
+ File v9f = new File(new File(v2.getParent().toUri()), "v9");
+ Assert.assertTrue("Failed to rename " + v2f + " to " + v9f, v2f.renameTo(v9f));
+ Path v9 = new Path(v9f.toURI());
+
+ Configuration conf = new Configuration(false);
+ conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
+
+ conf.set(Property.INSTANCE_VOLUMES.getKey(), v8 + "," + v9);
+ conf.set(Property.INSTANCE_VOLUMES_REPLACEMENTS.getKey(), v1 + " " + v8 + "," + v2 + " " + v9);
+ BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
+ conf.writeXml(fos);
+ fos.close();
+
+ // start cluster and verify that volumes were replaced
+ cluster.start();
+
+ verifyVolumesUsed(tableNames[0], true, v8, v9);
+ verifyVolumesUsed(tableNames[1], true, v8, v9);
+
+ // verify writes to new dir
+ getConnector().tableOperations().compact(tableNames[0], null, null, true, true);
+ getConnector().tableOperations().compact(tableNames[1], null, null, true, true);
+
+ verifyVolumesUsed(tableNames[0], true, v8, v9);
+ verifyVolumesUsed(tableNames[1], true, v8, v9);
+
+ // check that root tablet is not on volume 1 or 2
+ ZooReader zreader = new ZooReader(cluster.getZooKeepers(), 30000);
+ String zpath = ZooUtil.getRoot(new ZooKeeperInstance(cluster.getClientConfig())) + RootTable.ZROOT_TABLET_PATH;
+ String rootTabletDir = new String(zreader.getData(zpath, false, null), UTF_8);
+ Assert.assertTrue(rootTabletDir.startsWith(v8.toString()) || rootTabletDir.startsWith(v9.toString()));
+
+ getConnector().tableOperations().clone(tableNames[1], tableNames[2], true, new HashMap<String,String>(), new HashSet<String>());
+
+ getConnector().tableOperations().flush(MetadataTable.NAME, null, null, true);
+ getConnector().tableOperations().flush(RootTable.NAME, null, null, true);
+
+ verifyVolumesUsed(tableNames[0], true, v8, v9);
+ verifyVolumesUsed(tableNames[1], true, v8, v9);
+ verifyVolumesUsed(tableNames[2], true, v8, v9);
+ }
+
+ @Test
+ public void testCleanReplaceVolumes() throws Exception {
+ testReplaceVolume(true);
+ }
+
+ @Test
+ public void testDirtyReplaceVolumes() throws Exception {
+ testReplaceVolume(false);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/WaitForBalanceIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/WaitForBalanceIT.java b/test/src/main/java/org/apache/accumulo/test/WaitForBalanceIT.java
new file mode 100644
index 0000000..249bf14
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/WaitForBalanceIT.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class WaitForBalanceIT extends ConfigurableMacBase {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ final Connector c = getConnector();
+ // ensure the metadata table is online
+ Iterators.size(c.createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
+ c.instanceOperations().waitForBalance();
+ assertTrue(isBalanced());
+ final String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.instanceOperations().waitForBalance();
+ final SortedSet<Text> partitionKeys = new TreeSet<Text>();
+ for (int i = 0; i < 1000; i++) {
+ partitionKeys.add(new Text("" + i));
+ }
+ c.tableOperations().addSplits(tableName, partitionKeys);
+ assertFalse(isBalanced());
+ c.instanceOperations().waitForBalance();
+ assertTrue(isBalanced());
+ }
+
+ private boolean isBalanced() throws Exception {
+ final Map<String,Integer> counts = new HashMap<String,Integer>();
+ int offline = 0;
+ final Connector c = getConnector();
+ for (String tableName : new String[] {MetadataTable.NAME, RootTable.NAME}) {
+ final Scanner s = c.createScanner(tableName, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.TabletsSection.getRange());
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
+ MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s);
+ String location = null;
+ for (Entry<Key,Value> entry : s) {
+ Key key = entry.getKey();
+ if (key.getColumnFamily().equals(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME)) {
+ location = key.getColumnQualifier().toString();
+ } else if (MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
+ if (location == null) {
+ offline++;
+ } else {
+ Integer count = counts.get(location);
+ if (count == null)
+ count = new Integer(0);
+ count = new Integer(count.intValue() + 1);
+ counts.put(location, count);
+ }
+ location = null;
+ }
+ }
+ }
+ // the replication table is expected to be offline for this test, so ignore it
+ if (offline > 1) {
+ System.out.println("Offline tablets " + offline);
+ return false;
+ }
+ int average = 0;
+ for (Integer i : counts.values()) {
+ average += i;
+ }
+ average /= counts.size();
+ System.out.println(counts);
+ int tablesCount = c.tableOperations().list().size();
+ for (Entry<String,Integer> hostCount : counts.entrySet()) {
+ if (Math.abs(average - hostCount.getValue()) > tablesCount) {
+ System.out.println("Average " + average + " count " + hostCount.getKey() + ": " + hostCount.getValue());
+ return false;
+ }
+ }
+ return true;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java b/test/src/main/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java
new file mode 100644
index 0000000..118f053
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/AccumuloInputFormatIT.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.lang.System.currentTimeMillis;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat;
+import org.apache.accumulo.core.client.mapreduce.impl.BatchInputSplit;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.ConfigurationCopy;
+import org.apache.accumulo.core.conf.DefaultConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.Job;
+import org.junit.Before;
+import org.junit.Test;
+
+public class AccumuloInputFormatIT extends AccumuloClusterHarness {
+
+ AccumuloInputFormat inputFormat;
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ }
+
+ @Before
+ public void before() {
+ inputFormat = new AccumuloInputFormat();
+ }
+
+ /**
+ * Tests several different paths through the getSplits() method by setting different properties and verifying the results.
+ */
+ @Test
+ public void testGetSplits() throws Exception {
+ Connector conn = getConnector();
+ String table = getUniqueNames(1)[0];
+ conn.tableOperations().create(table);
+ insertData(table, currentTimeMillis());
+
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ AccumuloConfiguration clusterClientConf = new ConfigurationCopy(new DefaultConfiguration());
+
+ // Pass SSL and CredentialProvider options into the ClientConfiguration given to AccumuloInputFormat
+ boolean sslEnabled = Boolean.valueOf(clusterClientConf.get(Property.INSTANCE_RPC_SSL_ENABLED));
+ if (sslEnabled) {
+ ClientProperty[] sslProperties = new ClientProperty[] {ClientProperty.INSTANCE_RPC_SSL_ENABLED, ClientProperty.INSTANCE_RPC_SSL_CLIENT_AUTH,
+ ClientProperty.RPC_SSL_KEYSTORE_PATH, ClientProperty.RPC_SSL_KEYSTORE_TYPE, ClientProperty.RPC_SSL_KEYSTORE_PASSWORD,
+ ClientProperty.RPC_SSL_TRUSTSTORE_PATH, ClientProperty.RPC_SSL_TRUSTSTORE_TYPE, ClientProperty.RPC_SSL_TRUSTSTORE_PASSWORD,
+ ClientProperty.RPC_USE_JSSE, ClientProperty.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS};
+
+ for (ClientProperty prop : sslProperties) {
+ // The default property is returned if it's not in the ClientConfiguration so we don't have to check if the value is actually defined
+ clientConf.setProperty(prop, clusterClientConf.get(prop.getKey()));
+ }
+ }
+
+ Job job = Job.getInstance();
+ AccumuloInputFormat.setInputTableName(job, table);
+ AccumuloInputFormat.setZooKeeperInstance(job, clientConf);
+ AccumuloInputFormat.setConnectorInfo(job, getAdminPrincipal(), getAdminToken());
+
+ // split table
+ TreeSet<Text> splitsToAdd = new TreeSet<Text>();
+ for (int i = 0; i < 10000; i += 1000)
+ splitsToAdd.add(new Text(String.format("%09d", i)));
+ conn.tableOperations().addSplits(table, splitsToAdd);
+ UtilWaitThread.sleep(500); // wait for splits to be propagated
+
+ // get splits without setting any range
+ Collection<Text> actualSplits = conn.tableOperations().listSplits(table);
+ List<InputSplit> splits = inputFormat.getSplits(job);
+ assertEquals(actualSplits.size() + 1, splits.size()); // No ranges set on the job so it'll start with -inf
+
+ // set ranges and get splits
+ List<Range> ranges = new ArrayList<Range>();
+ for (Text text : actualSplits)
+ ranges.add(new Range(text));
+ AccumuloInputFormat.setRanges(job, ranges);
+ splits = inputFormat.getSplits(job);
+ assertEquals(actualSplits.size(), splits.size());
+
+ // offline mode
+ AccumuloInputFormat.setOfflineTableScan(job, true);
+ try {
+ inputFormat.getSplits(job);
+ fail("An exception should have been thrown");
+ } catch (IOException e) {}
+
+ conn.tableOperations().offline(table, true);
+ splits = inputFormat.getSplits(job);
+ assertEquals(actualSplits.size(), splits.size());
+
+ // auto adjust ranges
+ ranges = new ArrayList<Range>();
+ for (int i = 0; i < 5; i++)
+ // overlapping ranges
+ ranges.add(new Range(String.format("%09d", i), String.format("%09d", i + 2)));
+ AccumuloInputFormat.setRanges(job, ranges);
+ splits = inputFormat.getSplits(job);
+ assertEquals(2, splits.size());
+
+ AccumuloInputFormat.setAutoAdjustRanges(job, false);
+ splits = inputFormat.getSplits(job);
+ assertEquals(ranges.size(), splits.size());
+
+ // BatchScan not available for offline scans
+ AccumuloInputFormat.setBatchScan(job, true);
+ // Reset auto-adjust ranges too
+ AccumuloInputFormat.setAutoAdjustRanges(job, true);
+
+ AccumuloInputFormat.setOfflineTableScan(job, true);
+ try {
+ inputFormat.getSplits(job);
+ fail("An exception should have been thrown");
+ } catch (IllegalArgumentException e) {}
+
+ conn.tableOperations().online(table, true);
+ AccumuloInputFormat.setOfflineTableScan(job, false);
+
+ // test for resumption of success
+ splits = inputFormat.getSplits(job);
+ assertEquals(2, splits.size());
+
+ // BatchScan not available with isolated iterators
+ AccumuloInputFormat.setScanIsolation(job, true);
+ try {
+ inputFormat.getSplits(job);
+ fail("An exception should have been thrown");
+ } catch (IllegalArgumentException e) {}
+ AccumuloInputFormat.setScanIsolation(job, false);
+
+ // test for resumption of success
+ splits = inputFormat.getSplits(job);
+ assertEquals(2, splits.size());
+
+ // BatchScan not available with local iterators
+ AccumuloInputFormat.setLocalIterators(job, true);
+ try {
+ inputFormat.getSplits(job);
+ fail("An exception should have been thrown");
+ } catch (IllegalArgumentException e) {}
+ AccumuloInputFormat.setLocalIterators(job, false);
+
+ // Check we are getting back correct type pf split
+ conn.tableOperations().online(table);
+ splits = inputFormat.getSplits(job);
+ for (InputSplit split : splits)
+ assert (split instanceof BatchInputSplit);
+
+ // We should divide along the tablet lines similar to when using `setAutoAdjustRanges(job, true)`
+ assertEquals(2, splits.size());
+ }
+
+ private void insertData(String tableName, long ts) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ BatchWriter bw = getConnector().createBatchWriter(tableName, null);
+
+ for (int i = 0; i < 10000; i++) {
+ String row = String.format("%09d", i);
+
+ Mutation m = new Mutation(new Text(row));
+ m.put(new Text("cf1"), new Text("cq1"), ts, new Value(("" + i).getBytes()));
+ bw.addMutation(m);
+ }
+ bw.close();
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/AddSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/AddSplitIT.java b/test/src/main/java/org/apache/accumulo/test/functional/AddSplitIT.java
new file mode 100644
index 0000000..4b4aeac
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/AddSplitIT.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class AddSplitIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void addSplitTest() throws Exception {
+
+ String tableName = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ c.tableOperations().create(tableName);
+
+ insertData(tableName, 1l);
+
+ TreeSet<Text> splits = new TreeSet<Text>();
+ splits.add(new Text(String.format("%09d", 333)));
+ splits.add(new Text(String.format("%09d", 666)));
+
+ c.tableOperations().addSplits(tableName, splits);
+
+ UtilWaitThread.sleep(100);
+
+ Collection<Text> actualSplits = c.tableOperations().listSplits(tableName);
+
+ if (!splits.equals(new TreeSet<Text>(actualSplits))) {
+ throw new Exception(splits + " != " + actualSplits);
+ }
+
+ verifyData(tableName, 1l);
+ insertData(tableName, 2l);
+
+ // did not clear splits on purpose, it should ignore existing split points
+ // and still create the three additional split points
+
+ splits.add(new Text(String.format("%09d", 200)));
+ splits.add(new Text(String.format("%09d", 500)));
+ splits.add(new Text(String.format("%09d", 800)));
+
+ c.tableOperations().addSplits(tableName, splits);
+
+ UtilWaitThread.sleep(100);
+
+ actualSplits = c.tableOperations().listSplits(tableName);
+
+ if (!splits.equals(new TreeSet<Text>(actualSplits))) {
+ throw new Exception(splits + " != " + actualSplits);
+ }
+
+ verifyData(tableName, 2l);
+ }
+
+ private void verifyData(String tableName, long ts) throws Exception {
+ Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
+
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+
+ for (int i = 0; i < 10000; i++) {
+ if (!iter.hasNext()) {
+ throw new Exception("row " + i + " not found");
+ }
+
+ Entry<Key,Value> entry = iter.next();
+
+ String row = String.format("%09d", i);
+
+ if (!entry.getKey().getRow().equals(new Text(row))) {
+ throw new Exception("unexpected row " + entry.getKey() + " " + i);
+ }
+
+ if (entry.getKey().getTimestamp() != ts) {
+ throw new Exception("unexpected ts " + entry.getKey() + " " + ts);
+ }
+
+ if (Integer.parseInt(entry.getValue().toString()) != i) {
+ throw new Exception("unexpected value " + entry + " " + i);
+ }
+ }
+
+ if (iter.hasNext()) {
+ throw new Exception("found more than expected " + iter.next());
+ }
+
+ }
+
+ private void insertData(String tableName, long ts) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
+ BatchWriter bw = getConnector().createBatchWriter(tableName, null);
+
+ for (int i = 0; i < 10000; i++) {
+ String row = String.format("%09d", i);
+
+ Mutation m = new Mutation(new Text(row));
+ m.put(new Text("cf1"), new Text("cq1"), ts, new Value(Integer.toString(i).getBytes(UTF_8)));
+ bw.addMutation(m);
+ }
+
+ bw.close();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BackupMasterIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BackupMasterIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BackupMasterIT.java
new file mode 100644
index 0000000..d8979db
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BackupMasterIT.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.fate.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
+import org.apache.accumulo.master.Master;
+import org.junit.Test;
+
+public class BackupMasterIT extends ConfigurableMacBase {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 120;
+ }
+
+ @Test
+ public void test() throws Exception {
+ // wait for master
+ UtilWaitThread.sleep(1000);
+ // create a backup
+ Process backup = exec(Master.class);
+ try {
+ ZooReaderWriter writer = new ZooReaderWriter(cluster.getZooKeepers(), 30 * 1000, "digest", "accumulo:DONTTELL".getBytes());
+ String root = "/accumulo/" + getConnector().getInstance().getInstanceID();
+ List<String> children = Collections.emptyList();
+ // wait for 2 lock entries
+ do {
+ UtilWaitThread.sleep(100);
+ children = writer.getChildren(root + "/masters/lock");
+ } while (children.size() != 2);
+ Collections.sort(children);
+ // wait for the backup master to learn to be the backup
+ UtilWaitThread.sleep(1000);
+ // generate a false zookeeper event
+ String lockPath = root + "/masters/lock/" + children.get(0);
+ byte data[] = writer.getData(lockPath, null);
+ writer.getZooKeeper().setData(lockPath, data, -1);
+ // let it propagate
+ UtilWaitThread.sleep(500);
+ // kill the master by removing its lock
+ writer.recursiveDelete(lockPath, NodeMissingPolicy.FAIL);
+ // ensure the backup becomes the master
+ getConnector().tableOperations().create(getUniqueNames(1)[0]);
+ } finally {
+ backup.destroy();
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java
new file mode 100644
index 0000000..4c6fc00
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BadIteratorMincIT.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+
+import java.util.EnumSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class BadIteratorMincIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ IteratorSetting is = new IteratorSetting(30, BadIterator.class);
+ c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+
+ Mutation m = new Mutation(new Text("r1"));
+ m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
+
+ bw.addMutation(m);
+ bw.close();
+
+ c.tableOperations().flush(tableName, null, null, false);
+ UtilWaitThread.sleep(1000);
+
+ // minc should fail, so there should be no files
+ FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
+
+ // try to scan table
+ Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
+ int count = Iterators.size(scanner.iterator());
+ assertEquals("Did not see expected # entries " + count, 1, count);
+
+ // remove the bad iterator
+ c.tableOperations().removeIterator(tableName, BadIterator.class.getSimpleName(), EnumSet.of(IteratorScope.minc));
+
+ UtilWaitThread.sleep(5000);
+
+ // minc should complete
+ FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
+
+ count = Iterators.size(scanner.iterator());
+
+ if (count != 1)
+ throw new Exception("Did not see expected # entries " + count);
+
+ // now try putting bad iterator back and deleting the table
+ c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc));
+ bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ m = new Mutation(new Text("r2"));
+ m.put(new Text("acf"), new Text(tableName), new Value("1".getBytes(UTF_8)));
+ bw.addMutation(m);
+ bw.close();
+
+ // make sure property is given time to propagate
+ UtilWaitThread.sleep(500);
+
+ c.tableOperations().flush(tableName, null, null, false);
+
+ // make sure the flush has time to start
+ UtilWaitThread.sleep(1000);
+
+ // this should not hang
+ c.tableOperations().delete(tableName);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java
new file mode 100644
index 0000000..ae470f6
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BalanceAfterCommsFailureIT.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.impl.MasterClient;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.master.thrift.MasterClientService;
+import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
+import org.apache.accumulo.core.master.thrift.TableInfo;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.trace.Tracer;
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+public class BalanceAfterCommsFailureIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.GENERAL_RPC_TIMEOUT, "2s");
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = this.getConnector();
+ c.tableOperations().create("test");
+ Collection<ProcessReference> tservers = getCluster().getProcesses().get(ServerType.TABLET_SERVER);
+ ArrayList<Integer> tserverPids = new ArrayList<Integer>(tservers.size());
+ for (ProcessReference tserver : tservers) {
+ Process p = tserver.getProcess();
+ if (!p.getClass().getName().equals("java.lang.UNIXProcess")) {
+ log.info("Found process that was not UNIXProcess, exiting test");
+ return;
+ }
+
+ Field f = p.getClass().getDeclaredField("pid");
+ f.setAccessible(true);
+ tserverPids.add(f.getInt(p));
+ }
+
+ for (int pid : tserverPids) {
+ assertEquals(0, Runtime.getRuntime().exec(new String[] {"kill", "-SIGSTOP", Integer.toString(pid)}).waitFor());
+ }
+ UtilWaitThread.sleep(20 * 1000);
+ for (int pid : tserverPids) {
+ assertEquals(0, Runtime.getRuntime().exec(new String[] {"kill", "-SIGCONT", Integer.toString(pid)}).waitFor());
+ }
+ SortedSet<Text> splits = new TreeSet<Text>();
+ for (String split : "a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")) {
+ splits.add(new Text(split));
+ }
+ c.tableOperations().addSplits("test", splits);
+ // Ensure all of the tablets are actually assigned
+ assertEquals(0, Iterables.size(c.createScanner("test", Authorizations.EMPTY)));
+ UtilWaitThread.sleep(30 * 1000);
+ checkBalance(c);
+ }
+
+ private void checkBalance(Connector c) throws Exception {
+ Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
+ ClientContext context = new ClientContext(c.getInstance(), creds, getClientConfig());
+
+ MasterMonitorInfo stats = null;
+ int unassignedTablets = 1;
+ for (int i = 0; unassignedTablets > 0 && i < 10; i++) {
+ MasterClientService.Iface client = null;
+ try {
+ client = MasterClient.getConnectionWithRetry(context);
+ stats = client.getMasterStats(Tracer.traceInfo(), context.rpcCreds());
+ } finally {
+ if (client != null)
+ MasterClient.close(client);
+ }
+ unassignedTablets = stats.getUnassignedTablets();
+ if (unassignedTablets > 0) {
+ log.info("Found " + unassignedTablets + " unassigned tablets, sleeping 3 seconds for tablet assignment");
+ Thread.sleep(3000);
+ }
+ }
+
+ assertEquals("Unassigned tablets were not assigned within 30 seconds", 0, unassignedTablets);
+
+ List<Integer> counts = new ArrayList<Integer>();
+ for (TabletServerStatus server : stats.tServerInfo) {
+ int count = 0;
+ for (TableInfo table : server.tableMap.values()) {
+ count += table.onlineTablets;
+ }
+ counts.add(count);
+ }
+ assertTrue("Expected to have at least two TabletServers", counts.size() > 1);
+ for (int i = 1; i < counts.size(); i++) {
+ int diff = Math.abs(counts.get(0) - counts.get(i));
+ assertTrue("Expected difference in tablets to be less than or equal to " + counts.size() + " but was " + diff + ". Counts " + counts,
+ diff <= counts.size());
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
new file mode 100644
index 0000000..623d79b
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BalanceInPresenceOfOfflineTableIT.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Arrays;
+import java.util.Map;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.impl.MasterClient;
+import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.master.thrift.MasterClientService;
+import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
+import org.apache.accumulo.core.master.thrift.TableInfo;
+import org.apache.accumulo.core.trace.Tracer;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.commons.lang.math.NumberUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.thrift.TException;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Start a new table, create many splits, and offline before they can rebalance. Then try to have a different table balance
+ */
+public class BalanceInPresenceOfOfflineTableIT extends AccumuloClusterHarness {
+
+ private static Logger log = LoggerFactory.getLogger(BalanceInPresenceOfOfflineTableIT.class);
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.TSERV_MAXMEM.getKey(), "10K");
+ siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "0");
+ cfg.setSiteConfig(siteConfig);
+ // ensure we have two tservers
+ if (cfg.getNumTservers() < 2) {
+ cfg.setNumTservers(2);
+ }
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 10 * 60;
+ }
+
+ private static final int NUM_SPLITS = 200;
+
+ private String UNUSED_TABLE, TEST_TABLE;
+
+ private Connector connector;
+
+ @Before
+ public void setupTables() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
+ Connector conn = getConnector();
+ // Need at least two tservers
+ Assume.assumeTrue("Not enough tservers to run test", conn.instanceOperations().getTabletServers().size() >= 2);
+
+ // set up splits
+ final SortedSet<Text> splits = new TreeSet<Text>();
+ for (int i = 0; i < NUM_SPLITS; i++) {
+ splits.add(new Text(String.format("%08x", i * 1000)));
+ }
+
+ String[] names = getUniqueNames(2);
+ UNUSED_TABLE = names[0];
+ TEST_TABLE = names[1];
+
+ // load into a table we won't use
+ connector = getConnector();
+ connector.tableOperations().create(UNUSED_TABLE);
+ connector.tableOperations().addSplits(UNUSED_TABLE, splits);
+ // mark the table offline before it can rebalance.
+ connector.tableOperations().offline(UNUSED_TABLE);
+
+ // actual test table
+ connector.tableOperations().create(TEST_TABLE);
+ connector.tableOperations().setProperty(TEST_TABLE, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+ }
+
+ @Test
+ public void test() throws Exception {
+ log.info("Test that balancing is not stopped by an offline table with outstanding migrations.");
+
+ log.debug("starting test ingestion");
+
+ TestIngest.Opts opts = new TestIngest.Opts();
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+ ClientConfiguration conf = cluster.getClientConfig();
+ if (conf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(cluster.getClientConfig());
+ vopts.updateKerberosCredentials(cluster.getClientConfig());
+ } else {
+ opts.setPrincipal("root");
+ vopts.setPrincipal("root");
+ }
+ vopts.rows = opts.rows = 200000;
+ opts.setTableName(TEST_TABLE);
+ TestIngest.ingest(connector, opts, new BatchWriterOpts());
+ connector.tableOperations().flush(TEST_TABLE, null, null, true);
+ vopts.setTableName(TEST_TABLE);
+ VerifyIngest.verifyIngest(connector, vopts, new ScannerOpts());
+
+ log.debug("waiting for balancing, up to ~5 minutes to allow for migration cleanup.");
+ final long startTime = System.currentTimeMillis();
+ long currentWait = 10 * 1000;
+ boolean balancingWorked = false;
+
+ Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
+ while (!balancingWorked && (System.currentTimeMillis() - startTime) < ((5 * 60 + 15) * 1000)) {
+ Thread.sleep(currentWait);
+ currentWait *= 2;
+
+ log.debug("fetch the list of tablets assigned to each tserver.");
+
+ MasterClientService.Iface client = null;
+ MasterMonitorInfo stats = null;
+ try {
+ Instance instance = new ZooKeeperInstance(cluster.getClientConfig());
+ client = MasterClient.getConnectionWithRetry(new ClientContext(instance, creds, cluster.getClientConfig()));
+ stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(instance));
+ } catch (ThriftSecurityException exception) {
+ throw new AccumuloSecurityException(exception);
+ } catch (TException exception) {
+ throw new AccumuloException(exception);
+ } finally {
+ if (client != null) {
+ MasterClient.close(client);
+ }
+ }
+
+ if (stats.getTServerInfoSize() < 2) {
+ log.debug("we need >= 2 servers. sleeping for " + currentWait + "ms");
+ continue;
+ }
+ if (stats.getUnassignedTablets() != 0) {
+ log.debug("We shouldn't have unassigned tablets. sleeping for " + currentWait + "ms");
+ continue;
+ }
+
+ long[] tabletsPerServer = new long[stats.getTServerInfoSize()];
+ Arrays.fill(tabletsPerServer, 0l);
+ for (int i = 0; i < stats.getTServerInfoSize(); i++) {
+ for (Map.Entry<String,TableInfo> entry : stats.getTServerInfo().get(i).getTableMap().entrySet()) {
+ tabletsPerServer[i] += entry.getValue().getTablets();
+ }
+ }
+
+ if (tabletsPerServer[0] <= 10) {
+ log.debug("We should have > 10 tablets. sleeping for " + currentWait + "ms");
+ continue;
+ }
+ long min = NumberUtils.min(tabletsPerServer), max = NumberUtils.max(tabletsPerServer);
+ log.debug("Min=" + min + ", Max=" + max);
+ if ((min / ((double) max)) < 0.5) {
+ log.debug("ratio of min to max tablets per server should be roughly even. sleeping for " + currentWait + "ms");
+ continue;
+ }
+ balancingWorked = true;
+ }
+
+ Assert.assertTrue("did not properly balance", balancingWorked);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java
new file mode 100644
index 0000000..14295c4
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BatchScanSplitIT.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map.Entry;
+import java.util.Random;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class BatchScanSplitIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(BatchScanSplitIT.class);
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.TSERV_MAJC_DELAY, "0");
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+
+ int numRows = 1 << 18;
+
+ BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+
+ for (int i = 0; i < numRows; i++) {
+ Mutation m = new Mutation(new Text(String.format("%09x", i)));
+ m.put(new Text("cf1"), new Text("cq1"), new Value(String.format("%016x", numRows - i).getBytes(UTF_8)));
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ getConnector().tableOperations().flush(tableName, null, null, true);
+
+ getConnector().tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "4K");
+
+ Collection<Text> splits = getConnector().tableOperations().listSplits(tableName);
+ while (splits.size() < 2) {
+ UtilWaitThread.sleep(1);
+ splits = getConnector().tableOperations().listSplits(tableName);
+ }
+
+ System.out.println("splits : " + splits);
+
+ Random random = new Random(19011230);
+ HashMap<Text,Value> expected = new HashMap<Text,Value>();
+ ArrayList<Range> ranges = new ArrayList<Range>();
+ for (int i = 0; i < 100; i++) {
+ int r = random.nextInt(numRows);
+ Text row = new Text(String.format("%09x", r));
+ expected.put(row, new Value(String.format("%016x", numRows - r).getBytes(UTF_8)));
+ ranges.add(new Range(row));
+ }
+
+ // logger.setLevel(Level.TRACE);
+
+ HashMap<Text,Value> found = new HashMap<Text,Value>();
+
+ for (int i = 0; i < 20; i++) {
+ BatchScanner bs = getConnector().createBatchScanner(tableName, Authorizations.EMPTY, 4);
+
+ found.clear();
+
+ long t1 = System.currentTimeMillis();
+
+ bs.setRanges(ranges);
+
+ for (Entry<Key,Value> entry : bs) {
+ found.put(entry.getKey().getRow(), entry.getValue());
+ }
+ bs.close();
+
+ long t2 = System.currentTimeMillis();
+
+ log.info(String.format("rate : %06.2f%n", ranges.size() / ((t2 - t1) / 1000.0)));
+
+ if (!found.equals(expected))
+ throw new Exception("Found and expected differ " + found + " " + expected);
+ }
+
+ splits = getConnector().tableOperations().listSplits(tableName);
+ log.info("splits : " + splits);
+ }
+
+}
[28/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java b/test/src/main/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java
new file mode 100644
index 0000000..0efb1aa
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/TabletStateChangeIteratorIT.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchDeleter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.master.thrift.MasterState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.ZooCache;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.apache.accumulo.server.master.state.CurrentState;
+import org.apache.accumulo.server.master.state.MergeInfo;
+import org.apache.accumulo.server.master.state.MetaDataTableScanner;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.master.state.TabletStateChangeIterator;
+import org.apache.accumulo.server.zookeeper.ZooLock;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.base.Predicate;
+import com.google.common.collect.Sets;
+
+/**
+ * Test to ensure that the {@link TabletStateChangeIterator} properly skips over tablet information in the metadata table when there is no work to be done on
+ * the tablet (see ACCUMULO-3580)
+ */
+public class TabletStateChangeIteratorIT extends SharedMiniClusterBase {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void test() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
+ String[] tables = getUniqueNames(4);
+ final String t1 = tables[0];
+ final String t2 = tables[1];
+ final String t3 = tables[2];
+ final String cloned = tables[3];
+
+ // create some metadata
+ createTable(t1, true);
+ createTable(t2, false);
+ createTable(t3, true);
+
+ // examine a clone of the metadata table, so we can manipulate it
+ cloneMetadataTable(cloned);
+
+ assertEquals("No tables should need attention", 0, findTabletsNeedingAttention(cloned));
+
+ // test the assigned case (no location)
+ removeLocation(cloned, t3);
+ assertEquals("Should have one tablet without a loc", 1, findTabletsNeedingAttention(cloned));
+
+ // TODO test the cases where the assignment is to a dead tserver
+ // TODO test the cases where there is ongoing merges
+ // TODO test the bad tablet location state case (active split, inconsistent metadata)
+
+ // clean up
+ dropTables(t1, t2, t3);
+ }
+
+ private void removeLocation(String table, String tableNameToModify) throws TableNotFoundException, MutationsRejectedException {
+ String tableIdToModify = getConnector().tableOperations().tableIdMap().get(tableNameToModify);
+ BatchDeleter deleter = getConnector().createBatchDeleter(table, Authorizations.EMPTY, 1, new BatchWriterConfig());
+ deleter.setRanges(Collections.singleton(new KeyExtent(new Text(tableIdToModify), null, null).toMetadataRange()));
+ deleter.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
+ deleter.delete();
+ deleter.close();
+ }
+
+ private int findTabletsNeedingAttention(String table) throws TableNotFoundException {
+ int results = 0;
+ Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
+ MetaDataTableScanner.configureScanner(scanner, new State());
+ scanner.updateScanIteratorOption("tabletChange", "debug", "1");
+ for (Entry<Key,Value> e : scanner) {
+ if (e != null)
+ results++;
+ }
+ return results;
+ }
+
+ private void createTable(String t, boolean online) throws AccumuloSecurityException, AccumuloException, TableNotFoundException, TableExistsException {
+ Connector conn = getConnector();
+ conn.tableOperations().create(t);
+ conn.tableOperations().online(t, true);
+ if (!online) {
+ conn.tableOperations().offline(t, true);
+ }
+ }
+
+ private void cloneMetadataTable(String cloned) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException {
+ getConnector().tableOperations().clone(MetadataTable.NAME, cloned, true, null, null);
+ }
+
+ private void dropTables(String... tables) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+ for (String t : tables) {
+ getConnector().tableOperations().delete(t);
+ }
+ }
+
+ private final class State implements CurrentState {
+
+ @Override
+ public Set<TServerInstance> onlineTabletServers() {
+ HashSet<TServerInstance> tservers = new HashSet<TServerInstance>();
+ for (String tserver : getConnector().instanceOperations().getTabletServers()) {
+ try {
+ String zPath = ZooUtil.getRoot(getConnector().getInstance()) + Constants.ZTSERVERS + "/" + tserver;
+ long sessionId = ZooLock.getSessionId(new ZooCache(getCluster().getZooKeepers(), getConnector().getInstance().getZooKeepersSessionTimeOut()), zPath);
+ tservers.add(new TServerInstance(tserver, sessionId));
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+ return tservers;
+ }
+
+ @Override
+ public Set<String> onlineTables() {
+ HashSet<String> onlineTables = new HashSet<String>(getConnector().tableOperations().tableIdMap().values());
+ return Sets.filter(onlineTables, new Predicate<String>() {
+ @Override
+ public boolean apply(String tableId) {
+ return Tables.getTableState(getConnector().getInstance(), tableId) == TableState.ONLINE;
+ }
+ });
+ }
+
+ @Override
+ public Collection<MergeInfo> merges() {
+ return Collections.emptySet();
+ }
+
+ @Override
+ public Collection<KeyExtent> migrations() {
+ return Collections.emptyList();
+ }
+
+ @Override
+ public MasterState getMasterState() {
+ return MasterState.NORMAL;
+ }
+
+ @Override
+ public Set<TServerInstance> shutdownServers() {
+ return Collections.emptySet();
+ }
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/TimeoutIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/TimeoutIT.java b/test/src/main/java/org/apache/accumulo/test/functional/TimeoutIT.java
new file mode 100644
index 0000000..ffadd22
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/TimeoutIT.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.fail;
+
+import java.util.Collections;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.TimedOutException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class TimeoutIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 75;
+ }
+
+ @Test
+ public void run() throws Exception {
+ Connector conn = getConnector();
+ String[] tableNames = getUniqueNames(2);
+ testBatchWriterTimeout(conn, tableNames[0]);
+ testBatchScannerTimeout(conn, tableNames[1]);
+ }
+
+ public void testBatchWriterTimeout(Connector conn, String tableName) throws Exception {
+ conn.tableOperations().create(tableName);
+ conn.tableOperations().addConstraint(tableName, SlowConstraint.class.getName());
+
+ // give constraint time to propagate through zookeeper
+ UtilWaitThread.sleep(1000);
+
+ BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig().setTimeout(3, TimeUnit.SECONDS));
+
+ Mutation mut = new Mutation("r1");
+ mut.put("cf1", "cq1", "v1");
+
+ bw.addMutation(mut);
+ try {
+ bw.close();
+ fail("batch writer did not timeout");
+ } catch (MutationsRejectedException mre) {
+ if (mre.getCause() instanceof TimedOutException)
+ return;
+ throw mre;
+ }
+ }
+
+ public void testBatchScannerTimeout(Connector conn, String tableName) throws Exception {
+ getConnector().tableOperations().create(tableName);
+
+ BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+
+ Mutation m = new Mutation("r1");
+ m.put("cf1", "cq1", "v1");
+ m.put("cf1", "cq2", "v2");
+ m.put("cf1", "cq3", "v3");
+ m.put("cf1", "cq4", "v4");
+
+ bw.addMutation(m);
+ bw.close();
+
+ BatchScanner bs = getConnector().createBatchScanner(tableName, Authorizations.EMPTY, 2);
+ bs.setRanges(Collections.singletonList(new Range()));
+
+ // should not timeout
+ for (Entry<Key,Value> entry : bs) {
+ entry.getKey();
+ }
+
+ bs.setTimeout(5, TimeUnit.SECONDS);
+ IteratorSetting iterSetting = new IteratorSetting(100, SlowIterator.class);
+ iterSetting.addOption("sleepTime", 2000 + "");
+ bs.addScanIterator(iterSetting);
+
+ try {
+ for (Entry<Key,Value> entry : bs) {
+ entry.getKey();
+ }
+ fail("batch scanner did not time out");
+ } catch (TimedOutException toe) {
+ // toe.printStackTrace();
+ }
+ bs.close();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/VisibilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/VisibilityIT.java b/test/src/main/java/org/apache/accumulo/test/functional/VisibilityIT.java
new file mode 100644
index 0000000..3d6ad85
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/VisibilityIT.java
@@ -0,0 +1,323 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.util.ByteArraySet;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class VisibilityIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ Authorizations origAuths = null;
+
+ @Before
+ public void emptyAuths() throws Exception {
+ Connector c = getConnector();
+ origAuths = c.securityOperations().getUserAuthorizations(getAdminPrincipal());
+ }
+
+ @After
+ public void resetAuths() throws Exception {
+ Connector c = getConnector();
+ if (null != origAuths) {
+ c.securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
+ }
+ }
+
+ @Test
+ public void run() throws Exception {
+ Connector c = getConnector();
+ String[] tableNames = getUniqueNames(2);
+ String table = tableNames[0];
+ c.tableOperations().create(table);
+ String table2 = tableNames[1];
+ c.tableOperations().create(table2);
+ c.tableOperations().setProperty(table2, Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "DEFLABEL");
+
+ insertData(c, table);
+ queryData(c, table);
+ deleteData(c, table);
+
+ insertDefaultData(c, table2);
+ queryDefaultData(c, table2);
+
+ }
+
+ private static SortedSet<String> nss(String... labels) {
+ TreeSet<String> ts = new TreeSet<String>();
+
+ for (String s : labels) {
+ ts.add(s);
+ }
+
+ return ts;
+ }
+
+ private void mput(Mutation m, String cf, String cq, String cv, String val) {
+ ColumnVisibility le = new ColumnVisibility(cv.getBytes(UTF_8));
+ m.put(new Text(cf), new Text(cq), le, new Value(val.getBytes(UTF_8)));
+ }
+
+ private void mputDelete(Mutation m, String cf, String cq, String cv) {
+ ColumnVisibility le = new ColumnVisibility(cv.getBytes(UTF_8));
+ m.putDelete(new Text(cf), new Text(cq), le);
+ }
+
+ private void insertData(Connector c, String tableName) throws Exception {
+
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m1 = new Mutation(new Text("row1"));
+
+ mput(m1, "cf1", "cq1", "", "v1");
+ mput(m1, "cf1", "cq1", "A", "v2");
+ mput(m1, "cf1", "cq1", "B", "v3");
+ mput(m1, "cf1", "cq1", "A&B", "v4");
+ mput(m1, "cf1", "cq1", "A&(L|M)", "v5");
+ mput(m1, "cf1", "cq1", "B&(L|M)", "v6");
+ mput(m1, "cf1", "cq1", "A&B&(L|M)", "v7");
+ mput(m1, "cf1", "cq1", "A&B&(L)", "v8");
+ mput(m1, "cf1", "cq1", "A&FOO", "v9");
+ mput(m1, "cf1", "cq1", "A&FOO&(L|M)", "v10");
+ mput(m1, "cf1", "cq1", "FOO", "v11");
+ mput(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)", "v12");
+ mput(m1, "cf1", "cq1", "A&B&(L|M|FOO)", "v13");
+
+ bw.addMutation(m1);
+ bw.close();
+ }
+
+ private void deleteData(Connector c, String tableName) throws Exception {
+
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m1 = new Mutation(new Text("row1"));
+
+ mputDelete(m1, "cf1", "cq1", "");
+ mputDelete(m1, "cf1", "cq1", "A");
+ mputDelete(m1, "cf1", "cq1", "A&B");
+ mputDelete(m1, "cf1", "cq1", "B&(L|M)");
+ mputDelete(m1, "cf1", "cq1", "A&B&(L)");
+ mputDelete(m1, "cf1", "cq1", "A&FOO&(L|M)");
+ mputDelete(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)");
+ mputDelete(m1, "cf1", "cq1", "FOO&A"); // should not delete anything
+
+ bw.addMutation(m1);
+ bw.close();
+
+ Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>();
+
+ expected.put(nss("A", "L"), nss("v5"));
+ expected.put(nss("A", "M"), nss("v5"));
+ expected.put(nss("B"), nss("v3"));
+ expected.put(nss("Z"), nss());
+ expected.put(nss("A", "B", "L"), nss("v7", "v13"));
+ expected.put(nss("A", "B", "M"), nss("v7", "v13"));
+ expected.put(nss("A", "B", "FOO"), nss("v13"));
+ expected.put(nss("FOO"), nss("v11"));
+ expected.put(nss("A", "FOO"), nss("v9"));
+
+ queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
+ }
+
+ private void insertDefaultData(Connector c, String tableName) throws Exception {
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m1 = new Mutation(new Text("row1"));
+
+ mput(m1, "cf1", "cq1", "BASE", "v1");
+ mput(m1, "cf1", "cq2", "DEFLABEL", "v2");
+ mput(m1, "cf1", "cq3", "", "v3");
+
+ bw.addMutation(m1);
+ bw.close();
+ }
+
+ private static void uniqueCombos(List<Set<String>> all, Set<String> prefix, Set<String> suffix) {
+
+ all.add(prefix);
+
+ TreeSet<String> ss = new TreeSet<String>(suffix);
+
+ for (String s : suffix) {
+ TreeSet<String> ps = new TreeSet<String>(prefix);
+ ps.add(s);
+ ss.remove(s);
+
+ uniqueCombos(all, ps, ss);
+ }
+ }
+
+ private void queryData(Connector c, String tableName) throws Exception {
+ Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>();
+ expected.put(nss(), nss("v1"));
+ expected.put(nss("A"), nss("v2"));
+ expected.put(nss("A", "L"), nss("v5"));
+ expected.put(nss("A", "M"), nss("v5"));
+ expected.put(nss("B"), nss("v3"));
+ expected.put(nss("B", "L"), nss("v6"));
+ expected.put(nss("B", "M"), nss("v6"));
+ expected.put(nss("Z"), nss());
+ expected.put(nss("A", "B"), nss("v4"));
+ expected.put(nss("A", "B", "L"), nss("v7", "v8", "v13"));
+ expected.put(nss("A", "B", "M"), nss("v7", "v13"));
+ expected.put(nss("A", "B", "FOO"), nss("v13"));
+ expected.put(nss("FOO"), nss("v11"));
+ expected.put(nss("A", "FOO"), nss("v9"));
+ expected.put(nss("A", "FOO", "L"), nss("v10", "v12"));
+ expected.put(nss("A", "FOO", "M"), nss("v10", "v12"));
+ expected.put(nss("B", "FOO", "L"), nss("v12"));
+ expected.put(nss("B", "FOO", "M"), nss("v12"));
+
+ queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
+ queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "L", "M", "Z"), expected);
+ queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "Z"), expected);
+ queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss("Z"), expected);
+ queryData(c, tableName, nss("A", "B", "FOO", "L", "M", "Z"), nss(), expected);
+ }
+
+ private void queryData(Connector c, String tableName, Set<String> allAuths, Set<String> userAuths, Map<Set<String>,Set<String>> expected) throws Exception {
+
+ c.securityOperations().changeUserAuthorizations(getAdminPrincipal(), new Authorizations(nbas(userAuths)));
+
+ ArrayList<Set<String>> combos = new ArrayList<Set<String>>();
+ uniqueCombos(combos, nss(), allAuths);
+
+ for (Set<String> set1 : combos) {
+ Set<String> e = new TreeSet<String>();
+ for (Set<String> set2 : combos) {
+
+ set2 = new HashSet<String>(set2);
+ set2.retainAll(userAuths);
+
+ if (set1.containsAll(set2) && expected.containsKey(set2)) {
+ e.addAll(expected.get(set2));
+ }
+ }
+
+ set1.retainAll(userAuths);
+ verify(c, tableName, set1, e);
+ }
+
+ }
+
+ private void queryDefaultData(Connector c, String tableName) throws Exception {
+ Scanner scanner;
+
+ // should return no records
+ c.securityOperations().changeUserAuthorizations(getAdminPrincipal(), new Authorizations("BASE", "DEFLABEL"));
+ scanner = getConnector().createScanner(tableName, new Authorizations());
+ verifyDefault(scanner, 0);
+
+ // should return one record
+ scanner = getConnector().createScanner(tableName, new Authorizations("BASE"));
+ verifyDefault(scanner, 1);
+
+ // should return all three records
+ scanner = getConnector().createScanner(tableName, new Authorizations("BASE", "DEFLABEL"));
+ verifyDefault(scanner, 3);
+ }
+
+ private void verifyDefault(Scanner scanner, int expectedCount) throws Exception {
+ int actual = Iterators.size(scanner.iterator());
+ if (actual != expectedCount)
+ throw new Exception("actual count " + actual + " != expected count " + expectedCount);
+ }
+
+ private void verify(Connector c, String tableName, Set<String> auths, Set<String> expectedValues) throws Exception {
+ ByteArraySet bas = nbas(auths);
+
+ try {
+ verify(c, tableName, bas, expectedValues.toArray(new String[0]));
+ } catch (Exception e) {
+ throw new Exception("Verification failed auths=" + auths + " exp=" + expectedValues, e);
+ }
+ }
+
+ private ByteArraySet nbas(Set<String> auths) {
+ ByteArraySet bas = new ByteArraySet();
+ for (String auth : auths) {
+ bas.add(auth.getBytes(UTF_8));
+ }
+ return bas;
+ }
+
+ private void verify(Connector c, String tableName, ByteArraySet nss, String... expected) throws Exception {
+ Scanner scanner = c.createScanner(tableName, new Authorizations(nss));
+ verify(scanner.iterator(), expected);
+
+ BatchScanner bs = getConnector().createBatchScanner(tableName, new Authorizations(nss), 3);
+ bs.setRanges(Collections.singleton(new Range()));
+ verify(bs.iterator(), expected);
+ bs.close();
+ }
+
+ private void verify(Iterator<Entry<Key,Value>> iter, String... expected) throws Exception {
+ HashSet<String> valuesSeen = new HashSet<String>();
+
+ while (iter.hasNext()) {
+ Entry<Key,Value> entry = iter.next();
+ if (valuesSeen.contains(entry.getValue().toString())) {
+ throw new Exception("Value seen twice");
+ }
+ valuesSeen.add(entry.getValue().toString());
+ }
+
+ for (String ev : expected) {
+ if (!valuesSeen.remove(ev)) {
+ throw new Exception("Did not see expected value " + ev);
+ }
+ }
+
+ if (valuesSeen.size() != 0) {
+ throw new Exception("Saw more values than expected " + valuesSeen);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java b/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
new file mode 100644
index 0000000..34d1c6d
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/WALSunnyDayIT.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.apache.accumulo.core.conf.Property.GC_CYCLE_DELAY;
+import static org.apache.accumulo.core.conf.Property.GC_CYCLE_START;
+import static org.apache.accumulo.core.conf.Property.INSTANCE_ZK_TIMEOUT;
+import static org.apache.accumulo.core.conf.Property.TSERV_WALOG_MAX_SIZE;
+import static org.apache.accumulo.core.conf.Property.TSERV_WAL_REPLICATION;
+import static org.apache.accumulo.core.security.Authorizations.EMPTY;
+import static org.apache.accumulo.minicluster.ServerType.GARBAGE_COLLECTOR;
+import static org.apache.accumulo.minicluster.ServerType.TABLET_SERVER;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.master.state.SetGoalState;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterControl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.log.WalStateManager;
+import org.apache.accumulo.server.log.WalStateManager.WalState;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class WALSunnyDayIT extends ConfigurableMacBase {
+
+ private static final Text CF = new Text(new byte[0]);
+
+ @Override
+ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(GC_CYCLE_DELAY, "1s");
+ cfg.setProperty(GC_CYCLE_START, "0s");
+ cfg.setProperty(TSERV_WALOG_MAX_SIZE, "1M");
+ cfg.setProperty(TSERV_WAL_REPLICATION, "1");
+ cfg.setProperty(INSTANCE_ZK_TIMEOUT, "3s");
+ cfg.setNumTservers(1);
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ int countTrue(Collection<Boolean> bools) {
+ int result = 0;
+ for (Boolean b : bools) {
+ if (b.booleanValue())
+ result++;
+ }
+ return result;
+ }
+
+ @Test
+ public void test() throws Exception {
+ MiniAccumuloClusterImpl mac = getCluster();
+ MiniAccumuloClusterControl control = mac.getClusterControl();
+ control.stop(GARBAGE_COLLECTOR);
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ writeSomeData(c, tableName, 1, 1);
+
+ // wal markers are added lazily
+ Map<String,Boolean> wals = getWals(c);
+ assertEquals(wals.toString(), 2, wals.size());
+ for (Boolean b : wals.values()) {
+ assertTrue("logs should be in use", b.booleanValue());
+ }
+
+ // roll log, get a new next
+ writeSomeData(c, tableName, 1000, 50);
+ Map<String,Boolean> walsAfterRoll = getWals(c);
+ assertEquals("should have 3 WALs after roll", 3, walsAfterRoll.size());
+ assertTrue("new WALs should be a superset of the old WALs", walsAfterRoll.keySet().containsAll(wals.keySet()));
+ assertEquals("all WALs should be in use", 3, countTrue(walsAfterRoll.values()));
+
+ // flush the tables
+ for (String table : new String[] {tableName, MetadataTable.NAME, RootTable.NAME}) {
+ c.tableOperations().flush(table, null, null, true);
+ }
+ UtilWaitThread.sleep(1000);
+ // rolled WAL is no longer in use, but needs to be GC'd
+ Map<String,Boolean> walsAfterflush = getWals(c);
+ assertEquals(walsAfterflush.toString(), 3, walsAfterflush.size());
+ assertEquals("inUse should be 2", 2, countTrue(walsAfterflush.values()));
+
+ // let the GC run for a little bit
+ control.start(GARBAGE_COLLECTOR);
+ UtilWaitThread.sleep(5 * 1000);
+ // make sure the unused WAL goes away
+ Map<String,Boolean> walsAfterGC = getWals(c);
+ assertEquals(walsAfterGC.toString(), 2, walsAfterGC.size());
+ control.stop(GARBAGE_COLLECTOR);
+ // restart the tserver, but don't run recovery on all tablets
+ control.stop(TABLET_SERVER);
+ // this delays recovery on the normal tables
+ assertEquals(0, cluster.exec(SetGoalState.class, "SAFE_MODE").waitFor());
+ control.start(TABLET_SERVER);
+
+ // wait for the metadata table to go back online
+ getRecoveryMarkers(c);
+ // allow a little time for the master to notice ASSIGNED_TO_DEAD_SERVER tablets
+ UtilWaitThread.sleep(5 * 1000);
+ Map<KeyExtent,List<String>> markers = getRecoveryMarkers(c);
+ // log.debug("markers " + markers);
+ assertEquals("one tablet should have markers", 1, markers.keySet().size());
+ assertEquals("tableId of the keyExtent should be 1", markers.keySet().iterator().next().getTableId(), new Text("1"));
+
+ // put some data in the WAL
+ assertEquals(0, cluster.exec(SetGoalState.class, "NORMAL").waitFor());
+ verifySomeData(c, tableName, 1000 * 50 + 1);
+ writeSomeData(c, tableName, 100, 100);
+
+ Map<String,Boolean> walsAfterRestart = getWals(c);
+ // log.debug("wals after " + walsAfterRestart);
+ assertEquals("used WALs after restart should be 4", 4, countTrue(walsAfterRestart.values()));
+ control.start(GARBAGE_COLLECTOR);
+ UtilWaitThread.sleep(5 * 1000);
+ Map<String,Boolean> walsAfterRestartAndGC = getWals(c);
+ assertEquals("wals left should be 2", 2, walsAfterRestartAndGC.size());
+ assertEquals("logs in use should be 2", 2, countTrue(walsAfterRestartAndGC.values()));
+ }
+
+ private void verifySomeData(Connector c, String tableName, int expected) throws Exception {
+ Scanner scan = c.createScanner(tableName, EMPTY);
+ int result = Iterators.size(scan.iterator());
+ scan.close();
+ Assert.assertEquals(expected, result);
+ }
+
+ private void writeSomeData(Connector conn, String tableName, int row, int col) throws Exception {
+ Random rand = new Random();
+ BatchWriter bw = conn.createBatchWriter(tableName, null);
+ byte[] rowData = new byte[10];
+ byte[] cq = new byte[10];
+ byte[] value = new byte[10];
+
+ for (int r = 0; r < row; r++) {
+ rand.nextBytes(rowData);
+ Mutation m = new Mutation(rowData);
+ for (int c = 0; c < col; c++) {
+ rand.nextBytes(cq);
+ rand.nextBytes(value);
+ m.put(CF, new Text(cq), new Value(value));
+ }
+ bw.addMutation(m);
+ if (r % 100 == 0) {
+ bw.flush();
+ }
+ }
+ bw.close();
+ }
+
+ private Map<String,Boolean> getWals(Connector c) throws Exception {
+ Map<String,Boolean> result = new HashMap<>();
+ Instance i = c.getInstance();
+ ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
+ WalStateManager wals = new WalStateManager(c.getInstance(), zk);
+ for (Entry<Path,WalState> entry : wals.getAllState().entrySet()) {
+ // WALs are in use if they are not unreferenced
+ result.put(entry.getKey().toString(), entry.getValue() != WalState.UNREFERENCED);
+ }
+ return result;
+ }
+
+ private Map<KeyExtent,List<String>> getRecoveryMarkers(Connector c) throws Exception {
+ Map<KeyExtent,List<String>> result = new HashMap<>();
+ Scanner root = c.createScanner(RootTable.NAME, EMPTY);
+ root.setRange(TabletsSection.getRange());
+ root.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME);
+ TabletColumnFamily.PREV_ROW_COLUMN.fetch(root);
+
+ Scanner meta = c.createScanner(MetadataTable.NAME, EMPTY);
+ meta.setRange(TabletsSection.getRange());
+ meta.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME);
+ TabletColumnFamily.PREV_ROW_COLUMN.fetch(meta);
+
+ List<String> logs = new ArrayList<>();
+ Iterator<Entry<Key,Value>> both = Iterators.concat(root.iterator(), meta.iterator());
+ while (both.hasNext()) {
+ Entry<Key,Value> entry = both.next();
+ Key key = entry.getKey();
+ if (key.getColumnFamily().equals(TabletsSection.LogColumnFamily.NAME)) {
+ logs.add(key.getColumnQualifier().toString());
+ }
+ if (TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key) && !logs.isEmpty()) {
+ KeyExtent extent = new KeyExtent(key.getRow(), entry.getValue());
+ result.put(extent, logs);
+ logs = new ArrayList<String>();
+ }
+ }
+ return result;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java b/test/src/main/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
new file mode 100644
index 0000000..07d197d
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/WatchTheWatchCountIT.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertTrue;
+
+import java.net.Socket;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Range;
+import com.google.common.net.HostAndPort;
+
+// ACCUMULO-2757 - make sure we don't make too many more watchers
+public class WatchTheWatchCountIT extends ConfigurableMacBase {
+ private static final Logger log = LoggerFactory.getLogger(WatchTheWatchCountIT.class);
+
+ public int defaultOverrideSeconds() {
+ return 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(3);
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ String[] tableNames = getUniqueNames(3);
+ for (String tableName : tableNames) {
+ c.tableOperations().create(tableName);
+ }
+ c.tableOperations().list();
+ String zooKeepers = c.getInstance().getZooKeepers();
+ final Range<Long> expectedWatcherRange = Range.open(475l, 700l);
+ long total = 0;
+ final HostAndPort hostAndPort = HostAndPort.fromString(zooKeepers);
+ for (int i = 0; i < 5; i++) {
+ Socket socket = new Socket(hostAndPort.getHostText(), hostAndPort.getPort());
+ try {
+ socket.getOutputStream().write("wchs\n".getBytes(), 0, 5);
+ byte[] buffer = new byte[1024];
+ int n = socket.getInputStream().read(buffer);
+ String response = new String(buffer, 0, n);
+ total = Long.parseLong(response.split(":")[1].trim());
+ log.info("Total: {}", total);
+ if (expectedWatcherRange.contains(total)) {
+ break;
+ }
+ log.debug("Expected number of watchers to be contained in {}, but actually was {}. Sleeping and retrying", expectedWatcherRange, total);
+ Thread.sleep(5000);
+ } finally {
+ socket.close();
+ }
+ }
+
+ assertTrue("Expected number of watchers to be contained in " + expectedWatcherRange + ", but actually was " + total, expectedWatcherRange.contains(total));
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java b/test/src/main/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
new file mode 100644
index 0000000..d877969
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Test;
+
+public class WriteAheadLogIT extends AccumuloClusterHarness {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "2M");
+ cfg.setProperty(Property.GC_CYCLE_DELAY, "1");
+ cfg.setProperty(Property.GC_CYCLE_START, "1");
+ cfg.setProperty(Property.MASTER_RECOVERY_DELAY, "1s");
+ cfg.setProperty(Property.TSERV_MAJC_DELAY, "1");
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "4s");
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 10 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "750K");
+ TestIngest.Opts opts = new TestIngest.Opts();
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+ opts.setTableName(tableName);
+
+ ClientConfiguration clientConfig = cluster.getClientConfig();
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConfig);
+ vopts.updateKerberosCredentials(clientConfig);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ vopts.setPrincipal(getAdminPrincipal());
+ }
+
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+ vopts.setTableName(tableName);
+ VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+ getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
+ VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/WriteLotsIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/WriteLotsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/WriteLotsIT.java
new file mode 100644
index 0000000..45b671c
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/WriteLotsIT.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+public class WriteLotsIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 90;
+ }
+
+ @Test
+ public void writeLots() throws Exception {
+ final Connector c = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ final AtomicReference<Exception> ref = new AtomicReference<Exception>();
+ List<Thread> threads = new ArrayList<Thread>();
+ final ClientConfiguration clientConfig = getCluster().getClientConfig();
+ for (int i = 0; i < 10; i++) {
+ final int index = i;
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ try {
+ TestIngest.Opts opts = new TestIngest.Opts();
+ opts.startRow = index * 10000;
+ opts.rows = 10000;
+ opts.setTableName(tableName);
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConfig);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ }
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+ } catch (Exception ex) {
+ ref.set(ex);
+ }
+ }
+ };
+ t.start();
+ threads.add(t);
+ }
+ for (Thread thread : threads) {
+ thread.join();
+ }
+ if (ref.get() != null) {
+ throw ref.get();
+ }
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+ vopts.rows = 10000 * 10;
+ vopts.setTableName(tableName);
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ vopts.updateKerberosCredentials(clientConfig);
+ } else {
+ vopts.setPrincipal(getAdminPrincipal());
+ }
+ VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ZooCacheIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ZooCacheIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ZooCacheIT.java
new file mode 100644
index 0000000..a531ee0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ZooCacheIT.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.commons.io.FileUtils;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class ZooCacheIT extends ConfigurableMacBase {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ private static String pathName = "/zcTest-42";
+ private static File testDir;
+
+ @BeforeClass
+ public static void createTestDirectory() {
+ testDir = new File(createTestDir(ZooCacheIT.class.getName()), pathName);
+ FileUtils.deleteQuietly(testDir);
+ assertTrue(testDir.mkdir());
+ }
+
+ @Test
+ public void test() throws Exception {
+ assertEquals(0, exec(CacheTestClean.class, pathName, testDir.getAbsolutePath()).waitFor());
+ final AtomicReference<Exception> ref = new AtomicReference<Exception>();
+ List<Thread> threads = new ArrayList<Thread>();
+ for (int i = 0; i < 3; i++) {
+ Thread reader = new Thread() {
+ @Override
+ public void run() {
+ try {
+ CacheTestReader.main(new String[] {pathName, testDir.getAbsolutePath(), getConnector().getInstance().getZooKeepers()});
+ } catch (Exception ex) {
+ ref.set(ex);
+ }
+ }
+ };
+ reader.start();
+ threads.add(reader);
+ }
+ assertEquals(0, exec(CacheTestWriter.class, pathName, testDir.getAbsolutePath(), "3", "50").waitFor());
+ for (Thread t : threads) {
+ t.join();
+ if (ref.get() != null)
+ throw ref.get();
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java
new file mode 100644
index 0000000..19f90fe
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ZookeeperRestartIT.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+public class ZookeeperRestartIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = new HashMap<String,String>();
+ siteConfig.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "3s");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ c.tableOperations().create("test_ingest");
+ BatchWriter bw = c.createBatchWriter("test_ingest", null);
+ Mutation m = new Mutation("row");
+ m.put("cf", "cq", "value");
+ bw.addMutation(m);
+ bw.close();
+
+ // kill zookeeper
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.ZOOKEEPER))
+ cluster.killProcess(ServerType.ZOOKEEPER, proc);
+
+ // give the servers time to react
+ UtilWaitThread.sleep(1000);
+
+ // start zookeeper back up
+ cluster.start();
+
+ // use the tservers
+ Scanner s = c.createScanner("test_ingest", Authorizations.EMPTY);
+ Iterator<Entry<Key,Value>> i = s.iterator();
+ assertTrue(i.hasNext());
+ assertEquals("row", i.next().getKey().getRow().toString());
+ assertFalse(i.hasNext());
+ // use the master
+ c.tableOperations().delete("test_ingest");
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java b/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
new file mode 100644
index 0000000..a0d355e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/performance/RollWALPerformanceIT.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.performance;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.accumulo.test.continuous.ContinuousIngest;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class RollWALPerformanceIT extends ConfigurableMacBase {
+
+ @Override
+ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.TSERV_WAL_REPLICATION, "1");
+ cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "10M");
+ cfg.setProperty(Property.TABLE_MINC_LOGS_MAX, "100");
+ cfg.setProperty(Property.GC_FILE_ARCHIVE, "false");
+ cfg.setProperty(Property.GC_CYCLE_START, "1s");
+ cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
+ cfg.useMiniDFS(true);
+ }
+
+ private long ingest() throws Exception {
+ final Connector c = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+
+ log.info("Creating the table");
+ c.tableOperations().create(tableName);
+
+ log.info("Splitting the table");
+ final long SPLIT_COUNT = 100;
+ final long distance = Long.MAX_VALUE / SPLIT_COUNT;
+ final SortedSet<Text> splits = new TreeSet<Text>();
+ for (int i = 1; i < SPLIT_COUNT; i++) {
+ splits.add(new Text(String.format("%016x", i * distance)));
+ }
+ c.tableOperations().addSplits(tableName, splits);
+
+ log.info("Waiting for balance");
+ c.instanceOperations().waitForBalance();
+
+ final Instance inst = c.getInstance();
+
+ log.info("Starting ingest");
+ final long start = System.currentTimeMillis();
+ final String args[] = {"-i", inst.getInstanceName(), "-z", inst.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--batchThreads", "2", "--table",
+ tableName, "--num", Long.toString(1000 * 1000), // 1M 100 byte entries
+ };
+
+ ContinuousIngest.main(args);
+ final long result = System.currentTimeMillis() - start;
+ log.debug(String.format("Finished in %,d ms", result));
+ log.debug("Dropping table");
+ c.tableOperations().delete(tableName);
+ return result;
+ }
+
+ private long getAverage() throws Exception {
+ final int REPEAT = 3;
+ long totalTime = 0;
+ for (int i = 0; i < REPEAT; i++) {
+ totalTime += ingest();
+ }
+ return totalTime / REPEAT;
+ }
+
+ private void testWalPerformanceOnce() throws Exception {
+ // get time with a small WAL, which will cause many WAL roll-overs
+ long avg1 = getAverage();
+ // use a bigger WAL max size to eliminate WAL roll-overs
+ Connector c = getConnector();
+ c.instanceOperations().setProperty(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1G");
+ c.tableOperations().flush(MetadataTable.NAME, null, null, true);
+ c.tableOperations().flush(RootTable.NAME, null, null, true);
+ for (ProcessReference tserver : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) {
+ getCluster().killProcess(ServerType.TABLET_SERVER, tserver);
+ }
+ getCluster().start();
+ long avg2 = getAverage();
+ log.info(String.format("Average run time with small WAL %,d with large WAL %,d", avg1, avg2));
+ assertTrue(avg1 > avg2);
+ double percent = (100. * avg1) / avg2;
+ log.info(String.format("Percent of large log: %.2f%%", percent));
+ assertTrue(percent < 125.);
+ }
+
+ @Test(timeout = 20 * 60 * 1000)
+ public void testWalPerformance() throws Exception {
+ testWalPerformanceOnce();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java b/test/src/main/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java
new file mode 100644
index 0000000..236522a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/performance/metadata/FastBulkImportIT.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.performance.metadata;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVWriter;
+import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+// ACCUMULO-3327
+public class FastBulkImportIT extends ConfigurableMacBase {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Override
+ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(3);
+ cfg.setProperty(Property.TSERV_BULK_ASSIGNMENT_THREADS, "5");
+ cfg.setProperty(Property.TSERV_BULK_PROCESS_THREADS, "5");
+ cfg.setProperty(Property.TABLE_MAJC_RATIO, "9999");
+ cfg.setProperty(Property.TABLE_FILE_MAX, "9999");
+ }
+
+ @Test
+ public void test() throws Exception {
+ log.info("Creating table");
+ final String tableName = getUniqueNames(1)[0];
+ final Connector c = getConnector();
+ c.tableOperations().create(tableName);
+ log.info("Adding splits");
+ SortedSet<Text> splits = new TreeSet<>();
+ for (int i = 1; i < 0xfff; i += 7) {
+ splits.add(new Text(Integer.toHexString(i)));
+ }
+ c.tableOperations().addSplits(tableName, splits);
+
+ log.info("Creating lots of bulk import files");
+ FileSystem fs = getCluster().getFileSystem();
+ Path basePath = getCluster().getTemporaryPath();
+ CachedConfiguration.setInstance(fs.getConf());
+
+ Path base = new Path(basePath, "testBulkFail_" + tableName);
+ fs.delete(base, true);
+ fs.mkdirs(base);
+ Path bulkFailures = new Path(base, "failures");
+ Path files = new Path(base, "files");
+ fs.mkdirs(bulkFailures);
+ fs.mkdirs(files);
+ for (int i = 0; i < 100; i++) {
+ FileSKVWriter writer = FileOperations.getInstance().openWriter(files.toString() + "/bulk_" + i + "." + RFile.EXTENSION, fs, fs.getConf(),
+ AccumuloConfiguration.getDefaultConfiguration());
+ writer.startDefaultLocalityGroup();
+ for (int j = 0x100; j < 0xfff; j += 3) {
+ writer.append(new Key(Integer.toHexString(j)), new Value(new byte[0]));
+ }
+ writer.close();
+ }
+ log.info("Waiting for balance");
+ c.instanceOperations().waitForBalance();
+
+ log.info("Bulk importing files");
+ long now = System.currentTimeMillis();
+ c.tableOperations().importDirectory(tableName, files.toString(), bulkFailures.toString(), true);
+ double diffSeconds = (System.currentTimeMillis() - now) / 1000.;
+ log.info(String.format("Import took %.2f seconds", diffSeconds));
+ assertTrue(diffSeconds < 30);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/proxy/ProxyDurabilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/ProxyDurabilityIT.java b/test/src/main/java/org/apache/accumulo/test/proxy/ProxyDurabilityIT.java
new file mode 100644
index 0000000..745326e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/ProxyDurabilityIT.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.proxy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.nio.ByteBuffer;
+import java.nio.file.Files;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.accumulo.proxy.Proxy;
+import org.apache.accumulo.proxy.thrift.AccumuloProxy.Client;
+import org.apache.accumulo.proxy.thrift.Column;
+import org.apache.accumulo.proxy.thrift.ColumnUpdate;
+import org.apache.accumulo.proxy.thrift.Condition;
+import org.apache.accumulo.proxy.thrift.ConditionalStatus;
+import org.apache.accumulo.proxy.thrift.ConditionalUpdates;
+import org.apache.accumulo.proxy.thrift.ConditionalWriterOptions;
+import org.apache.accumulo.proxy.thrift.Durability;
+import org.apache.accumulo.proxy.thrift.TimeType;
+import org.apache.accumulo.proxy.thrift.WriterOptions;
+import org.apache.accumulo.server.util.PortUtils;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.thrift.protocol.TJSONProtocol;
+import org.apache.thrift.server.TServer;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+import com.google.common.net.HostAndPort;
+
+public class ProxyDurabilityIT extends ConfigurableMacBase {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "10s");
+ cfg.setNumTservers(1);
+ }
+
+ private static ByteBuffer bytes(String value) {
+ return ByteBuffer.wrap(value.getBytes());
+ }
+
+ @Test
+ public void testDurability() throws Exception {
+ Connector c = getConnector();
+ Properties props = new Properties();
+ // Avoid issues with locally installed client configuration files with custom properties
+ File emptyFile = Files.createTempFile(null, null).toFile();
+ emptyFile.deleteOnExit();
+ props.put("instance", c.getInstance().getInstanceName());
+ props.put("zookeepers", c.getInstance().getZooKeepers());
+ props.put("tokenClass", PasswordToken.class.getName());
+ props.put("clientConfigurationFile", emptyFile.toString());
+
+ TJSONProtocol.Factory protocol = new TJSONProtocol.Factory();
+
+ int proxyPort = PortUtils.getRandomFreePort();
+ final TServer proxyServer = Proxy.createProxyServer(HostAndPort.fromParts("localhost", proxyPort), protocol, props).server;
+ while (!proxyServer.isServing())
+ UtilWaitThread.sleep(100);
+ Client client = new TestProxyClient("localhost", proxyPort, protocol).proxy();
+ Map<String,String> properties = new TreeMap<String,String>();
+ properties.put("password", ROOT_PASSWORD);
+ ByteBuffer login = client.login("root", properties);
+
+ String tableName = getUniqueNames(1)[0];
+ client.createTable(login, tableName, true, TimeType.MILLIS);
+ assertTrue(c.tableOperations().exists(tableName));
+
+ WriterOptions options = new WriterOptions();
+ options.setDurability(Durability.NONE);
+ String writer = client.createWriter(login, tableName, options);
+ Map<ByteBuffer,List<ColumnUpdate>> cells = new TreeMap<ByteBuffer,List<ColumnUpdate>>();
+ ColumnUpdate column = new ColumnUpdate(bytes("cf"), bytes("cq"));
+ column.setValue("value".getBytes());
+ cells.put(bytes("row"), Collections.singletonList(column));
+ client.update(writer, cells);
+ client.closeWriter(writer);
+ assertEquals(1, count(tableName));
+ restartTServer();
+ assertEquals(0, count(tableName));
+
+ ConditionalWriterOptions cfg = new ConditionalWriterOptions();
+ cfg.setDurability(Durability.SYNC);
+ String cwriter = client.createConditionalWriter(login, tableName, cfg);
+ ConditionalUpdates updates = new ConditionalUpdates();
+ updates.addToConditions(new Condition(new Column(bytes("cf"), bytes("cq"), bytes(""))));
+ updates.addToUpdates(column);
+ Map<ByteBuffer,ConditionalStatus> status = client.updateRowsConditionally(cwriter, Collections.singletonMap(bytes("row"), updates));
+ assertEquals(ConditionalStatus.ACCEPTED, status.get(bytes("row")));
+ assertEquals(1, count(tableName));
+ restartTServer();
+ assertEquals(1, count(tableName));
+
+ proxyServer.stop();
+ }
+
+ private void restartTServer() throws Exception {
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+ cluster.start();
+ }
+
+ private int count(String tableName) throws Exception {
+ return Iterators.size((getConnector().createScanner(tableName, Authorizations.EMPTY)).iterator());
+ }
+
+}
[43/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
ACCUMULO-3871 move ITs into distro jar, stop building test jar
Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/01ae5b85
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/01ae5b85
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/01ae5b85
Branch: refs/heads/master
Commit: 01ae5b85897c318da639f875097edb5c7c3212aa
Parents: ab5a867
Author: Eric Newton <er...@gmail.com>
Authored: Thu Jun 4 14:52:27 2015 -0400
Committer: Eric Newton <er...@gmail.com>
Committed: Thu Jun 4 14:52:27 2015 -0400
----------------------------------------------------------------------
assemble/pom.xml | 11 -
test/pom.xml | 89 +-
.../harness/AccumuloClusterHarness.java | 338 +++
.../apache/accumulo/harness/AccumuloITBase.java | 104 +
.../MiniClusterConfigurationCallback.java | 41 +
.../accumulo/harness/MiniClusterHarness.java | 242 ++
.../accumulo/harness/SharedMiniClusterBase.java | 185 ++
.../org/apache/accumulo/harness/TestingKdc.java | 210 ++
.../conf/AccumuloClusterConfiguration.java | 35 +
.../AccumuloClusterPropertyConfiguration.java | 195 ++
.../conf/AccumuloMiniClusterConfiguration.java | 117 +
.../StandaloneAccumuloClusterConfiguration.java | 252 ++
.../accumulo/test/AccumuloOutputFormatIT.java | 125 +
.../test/ArbitraryTablePropertiesIT.java | 198 ++
.../accumulo/test/AssignmentThreadsIT.java | 94 +
.../apache/accumulo/test/AuditMessageIT.java | 506 ++++
.../test/BadDeleteMarkersCreatedIT.java | 176 ++
.../apache/accumulo/test/BalanceFasterIT.java | 94 +
.../org/apache/accumulo/test/BalanceIT.java | 52 +
.../test/BalanceWithOfflineTableIT.java | 90 +
.../org/apache/accumulo/test/BatchWriterIT.java | 49 +
.../accumulo/test/BulkImportVolumeIT.java | 95 +
.../org/apache/accumulo/test/CleanWalIT.java | 146 ++
.../accumulo/test/ConditionalWriterIT.java | 1349 +++++++++++
.../test/ConfigurableMajorCompactionIT.java | 119 +
.../test/CreateTableWithNewTableConfigIT.java | 193 ++
.../org/apache/accumulo/test/DumpConfigIT.java | 69 +
.../org/apache/accumulo/test/ExistingMacIT.java | 169 ++
.../org/apache/accumulo/test/FileArchiveIT.java | 271 +++
.../accumulo/test/GarbageCollectWALIT.java | 81 +
.../apache/accumulo/test/ImportExportIT.java | 198 ++
.../accumulo/test/IntegrationTestMapReduce.java | 146 ++
.../accumulo/test/InterruptibleScannersIT.java | 102 +
.../accumulo/test/KeyValueEqualityIT.java | 77 +
.../apache/accumulo/test/LargeSplitRowIT.java | 286 +++
.../test/MasterRepairsDualAssignmentIT.java | 161 ++
.../accumulo/test/MetaConstraintRetryIT.java | 63 +
.../apache/accumulo/test/MetaGetsReadersIT.java | 116 +
.../org/apache/accumulo/test/MetaSplitIT.java | 137 ++
.../MissingWalHeaderCompletesRecoveryIT.java | 211 ++
.../accumulo/test/MultiTableBatchWriterIT.java | 518 ++++
.../accumulo/test/MultiTableRecoveryIT.java | 134 ++
.../org/apache/accumulo/test/NamespacesIT.java | 1362 +++++++++++
.../test/RecoveryCompactionsAreFlushesIT.java | 101 +
.../test/RewriteTabletDirectoriesIT.java | 168 ++
.../apache/accumulo/test/ScanIteratorIT.java | 170 ++
.../org/apache/accumulo/test/ShellConfigIT.java | 103 +
.../org/apache/accumulo/test/ShellServerIT.java | 1609 +++++++++++++
.../accumulo/test/SplitCancelsMajCIT.java | 89 +
.../apache/accumulo/test/SplitRecoveryIT.java | 137 ++
.../test/TableConfigurationUpdateIT.java | 139 ++
.../apache/accumulo/test/TableOperationsIT.java | 375 +++
.../accumulo/test/TabletServerGivesUpIT.java | 73 +
.../org/apache/accumulo/test/TotalQueuedIT.java | 131 +
.../test/TracerRecoversAfterOfflineTableIT.java | 127 +
.../accumulo/test/TransportCachingIT.java | 120 +
.../org/apache/accumulo/test/UnusedWALIT.java | 153 ++
.../accumulo/test/UserCompactionStrategyIT.java | 296 +++
.../java/org/apache/accumulo/test/UsersIT.java | 60 +
.../accumulo/test/VerifySerialRecoveryIT.java | 107 +
.../apache/accumulo/test/VolumeChooserIT.java | 392 +++
.../java/org/apache/accumulo/test/VolumeIT.java | 568 +++++
.../apache/accumulo/test/WaitForBalanceIT.java | 118 +
.../test/functional/AccumuloInputFormatIT.java | 210 ++
.../accumulo/test/functional/AddSplitIT.java | 142 ++
.../test/functional/BackupMasterIT.java | 68 +
.../test/functional/BadIteratorMincIT.java | 107 +
.../functional/BalanceAfterCommsFailureIT.java | 138 ++
.../BalanceInPresenceOfOfflineTableIT.java | 201 ++
.../test/functional/BatchScanSplitIT.java | 129 +
.../test/functional/BatchWriterFlushIT.java | 178 ++
.../test/functional/BigRootTabletIT.java | 66 +
.../accumulo/test/functional/BinaryIT.java | 86 +
.../test/functional/BinaryStressIT.java | 107 +
.../accumulo/test/functional/BloomFilterIT.java | 256 ++
.../accumulo/test/functional/BulkFileIT.java | 130 +
.../apache/accumulo/test/functional/BulkIT.java | 120 +
.../functional/BulkSplitOptimizationIT.java | 142 ++
.../test/functional/ChaoticBalancerIT.java | 85 +
.../accumulo/test/functional/ClassLoaderIT.java | 104 +
.../accumulo/test/functional/CleanTmpIT.java | 112 +
.../accumulo/test/functional/CleanUpIT.java | 151 ++
.../accumulo/test/functional/CloneTestIT.java | 295 +++
.../accumulo/test/functional/CombinerIT.java | 76 +
.../accumulo/test/functional/CompactionIT.java | 184 ++
.../accumulo/test/functional/ConcurrencyIT.java | 158 ++
.../functional/ConfigurableCompactionIT.java | 164 ++
.../test/functional/ConfigurableMacBase.java | 182 ++
.../accumulo/test/functional/ConstraintIT.java | 335 +++
.../test/functional/CreateAndUseIT.java | 130 +
.../test/functional/CreateManyScannersIT.java | 41 +
.../accumulo/test/functional/CredentialsIT.java | 124 +
.../test/functional/DeleteEverythingIT.java | 117 +
.../accumulo/test/functional/DeleteIT.java | 106 +
.../accumulo/test/functional/DeleteRowsIT.java | 154 ++
.../test/functional/DeleteRowsSplitIT.java | 147 ++
.../functional/DeleteTableDuringSplitIT.java | 109 +
.../functional/DeletedTablesDontFlushIT.java | 62 +
.../accumulo/test/functional/DurabilityIT.java | 222 ++
.../test/functional/DynamicThreadPoolsIT.java | 126 +
.../accumulo/test/functional/ExamplesIT.java | 660 +++++
.../test/functional/FateStarvationIT.java | 80 +
.../test/functional/FunctionalTestUtils.java | 192 ++
.../test/functional/GarbageCollectorIT.java | 301 +++
.../test/functional/HalfDeadTServerIT.java | 218 ++
.../accumulo/test/functional/KerberosIT.java | 573 +++++
.../test/functional/KerberosProxyIT.java | 426 ++++
.../accumulo/test/functional/LargeRowIT.java | 219 ++
.../test/functional/LateLastContactIT.java | 49 +
.../accumulo/test/functional/LogicalTimeIT.java | 109 +
.../accumulo/test/functional/MapReduceIT.java | 92 +
.../test/functional/MasterAssignmentIT.java | 100 +
.../test/functional/MasterFailoverIT.java | 80 +
.../accumulo/test/functional/MaxOpenIT.java | 176 ++
.../accumulo/test/functional/MergeIT.java | 194 ++
.../accumulo/test/functional/MetadataIT.java | 148 ++
.../test/functional/MetadataMaxFilesIT.java | 116 +
.../test/functional/MetadataSplitIT.java | 56 +
.../test/functional/MonitorLoggingIT.java | 121 +
.../accumulo/test/functional/MonitorSslIT.java | 132 +
.../accumulo/test/functional/NativeMapIT.java | 613 +++++
.../accumulo/test/functional/PermissionsIT.java | 707 ++++++
.../accumulo/test/functional/ReadWriteIT.java | 456 ++++
.../functional/RecoveryWithEmptyRFileIT.java | 109 +
.../test/functional/RegexGroupBalanceIT.java | 192 ++
.../accumulo/test/functional/RenameIT.java | 74 +
.../accumulo/test/functional/RestartIT.java | 367 +++
.../test/functional/RestartStressIT.java | 153 ++
.../accumulo/test/functional/RowDeleteIT.java | 109 +
.../accumulo/test/functional/ScanIdIT.java | 385 +++
.../test/functional/ScanIteratorIT.java | 134 ++
.../accumulo/test/functional/ScanRangeIT.java | 244 ++
.../test/functional/ScanSessionTimeOutIT.java | 142 ++
.../accumulo/test/functional/ScannerIT.java | 121 +
.../test/functional/ServerSideErrorIT.java | 128 +
.../test/functional/SessionDurabilityIT.java | 153 ++
.../accumulo/test/functional/ShutdownIT.java | 121 +
.../functional/SimpleBalancerFairnessIT.java | 117 +
.../test/functional/SparseColumnFamilyIT.java | 98 +
.../accumulo/test/functional/SplitIT.java | 223 ++
.../test/functional/SplitRecoveryIT.java | 279 +++
.../apache/accumulo/test/functional/SslIT.java | 72 +
.../test/functional/SslWithClientAuthIT.java | 77 +
.../accumulo/test/functional/StartIT.java | 43 +
.../accumulo/test/functional/TableIT.java | 108 +
.../accumulo/test/functional/TabletIT.java | 101 +
.../functional/TabletStateChangeIteratorIT.java | 192 ++
.../accumulo/test/functional/TimeoutIT.java | 120 +
.../accumulo/test/functional/VisibilityIT.java | 323 +++
.../accumulo/test/functional/WALSunnyDayIT.java | 234 ++
.../test/functional/WatchTheWatchCountIT.java | 80 +
.../test/functional/WriteAheadLogIT.java | 79 +
.../accumulo/test/functional/WriteLotsIT.java | 89 +
.../accumulo/test/functional/ZooCacheIT.java | 75 +
.../test/functional/ZookeeperRestartIT.java | 87 +
.../test/performance/RollWALPerformanceIT.java | 120 +
.../performance/metadata/FastBulkImportIT.java | 103 +
.../accumulo/test/proxy/ProxyDurabilityIT.java | 145 ++
.../accumulo/test/proxy/SimpleProxyBase.java | 2273 ++++++++++++++++++
.../accumulo/test/proxy/TBinaryProxyIT.java | 33 +
.../accumulo/test/proxy/TCompactProxyIT.java | 32 +
.../test/proxy/TJsonProtocolProxyIT.java | 33 +
.../accumulo/test/proxy/TTupleProxyIT.java | 33 +
.../accumulo/test/proxy/TestProxyClient.java | 204 ++
.../test/proxy/TestProxyInstanceOperations.java | 84 +
.../accumulo/test/proxy/TestProxyReadWrite.java | 468 ++++
.../test/proxy/TestProxySecurityOperations.java | 147 ++
.../test/proxy/TestProxyTableOperations.java | 202 ++
.../test/replication/CyclicReplicationIT.java | 332 +++
...bageCollectorCommunicatesWithTServersIT.java | 417 ++++
.../test/replication/KerberosReplicationIT.java | 233 ++
.../replication/MultiInstanceReplicationIT.java | 731 ++++++
.../replication/MultiTserverReplicationIT.java | 115 +
.../test/replication/ReplicationIT.java | 1436 +++++++++++
.../replication/ReplicationRandomWalkIT.java | 67 +
.../test/replication/StatusCombinerMacIT.java | 118 +
.../UnorderedWorkAssignerReplicationIT.java | 731 ++++++
...UnusedWalDoesntCloseReplicationStatusIT.java | 219 ++
.../server/security/SystemCredentialsIT.java | 233 ++
.../accumulo/test/start/KeywordStartIT.java | 197 ++
.../apache/accumulo/test/util/CertUtils.java | 348 +++
test/src/main/resources/FooConstraint.jar | Bin 0 -> 2130 bytes
test/src/main/resources/FooFilter.jar | Bin 0 -> 1645 bytes
test/src/main/resources/TestCombinerX.jar | Bin 0 -> 4335 bytes
test/src/main/resources/TestCombinerY.jar | Bin 0 -> 4129 bytes
test/src/main/resources/TestCompactionStrat.jar | Bin 0 -> 2530 bytes
test/src/main/resources/conf/accumulo-site.xml | 123 +
test/src/main/resources/conf/generic_logger.xml | 83 +
test/src/main/resources/conf/monitor_logger.xml | 64 +
test/src/main/resources/log4j.properties | 55 +
test/src/main/resources/randomwalk/Basic.xml | 37 +
test/src/main/resources/randomwalk/Simple.xml | 43 +
test/src/main/resources/unit/Basic.xml | 37 +
test/src/main/resources/unit/Simple.xml | 43 +
.../harness/AccumuloClusterHarness.java | 338 ---
.../apache/accumulo/harness/AccumuloITBase.java | 104 -
.../MiniClusterConfigurationCallback.java | 41 -
.../accumulo/harness/MiniClusterHarness.java | 242 --
.../accumulo/harness/SharedMiniClusterBase.java | 185 --
.../org/apache/accumulo/harness/TestingKdc.java | 210 --
.../conf/AccumuloClusterConfiguration.java | 35 -
.../AccumuloClusterPropertyConfiguration.java | 195 --
.../conf/AccumuloMiniClusterConfiguration.java | 117 -
.../StandaloneAccumuloClusterConfiguration.java | 252 --
.../accumulo/test/AccumuloOutputFormatIT.java | 125 -
.../test/ArbitraryTablePropertiesIT.java | 198 --
.../accumulo/test/AssignmentThreadsIT.java | 94 -
.../apache/accumulo/test/AuditMessageIT.java | 506 ----
.../test/BadDeleteMarkersCreatedIT.java | 176 --
.../apache/accumulo/test/BalanceFasterIT.java | 94 -
.../org/apache/accumulo/test/BalanceIT.java | 52 -
.../test/BalanceWithOfflineTableIT.java | 90 -
.../org/apache/accumulo/test/BatchWriterIT.java | 49 -
.../accumulo/test/BulkImportVolumeIT.java | 95 -
.../org/apache/accumulo/test/CleanWalIT.java | 146 --
.../accumulo/test/ConditionalWriterIT.java | 1349 -----------
.../test/ConfigurableMajorCompactionIT.java | 119 -
.../test/CreateTableWithNewTableConfigIT.java | 193 --
.../org/apache/accumulo/test/DumpConfigIT.java | 69 -
.../org/apache/accumulo/test/ExistingMacIT.java | 169 --
.../org/apache/accumulo/test/FileArchiveIT.java | 271 ---
.../accumulo/test/GarbageCollectWALIT.java | 81 -
.../apache/accumulo/test/ImportExportIT.java | 198 --
.../accumulo/test/IntegrationTestMapReduce.java | 146 --
.../accumulo/test/InterruptibleScannersIT.java | 102 -
.../accumulo/test/KeyValueEqualityIT.java | 77 -
.../apache/accumulo/test/LargeSplitRowIT.java | 286 ---
.../test/MasterRepairsDualAssignmentIT.java | 161 --
.../accumulo/test/MetaConstraintRetryIT.java | 63 -
.../apache/accumulo/test/MetaGetsReadersIT.java | 116 -
.../org/apache/accumulo/test/MetaSplitIT.java | 137 --
.../MissingWalHeaderCompletesRecoveryIT.java | 211 --
.../accumulo/test/MultiTableBatchWriterIT.java | 518 ----
.../accumulo/test/MultiTableRecoveryIT.java | 134 --
.../org/apache/accumulo/test/NamespacesIT.java | 1362 -----------
.../test/RecoveryCompactionsAreFlushesIT.java | 101 -
.../test/RewriteTabletDirectoriesIT.java | 168 --
.../apache/accumulo/test/ScanIteratorIT.java | 170 --
.../org/apache/accumulo/test/ShellConfigIT.java | 103 -
.../org/apache/accumulo/test/ShellServerIT.java | 1609 -------------
.../accumulo/test/SplitCancelsMajCIT.java | 89 -
.../apache/accumulo/test/SplitRecoveryIT.java | 137 --
.../test/TableConfigurationUpdateIT.java | 139 --
.../apache/accumulo/test/TableOperationsIT.java | 375 ---
.../accumulo/test/TabletServerGivesUpIT.java | 73 -
.../org/apache/accumulo/test/TotalQueuedIT.java | 131 -
.../test/TracerRecoversAfterOfflineTableIT.java | 127 -
.../accumulo/test/TransportCachingIT.java | 120 -
.../org/apache/accumulo/test/UnusedWALIT.java | 153 --
.../accumulo/test/UserCompactionStrategyIT.java | 296 ---
.../java/org/apache/accumulo/test/UsersIT.java | 60 -
.../accumulo/test/VerifySerialRecoveryIT.java | 107 -
.../apache/accumulo/test/VolumeChooserIT.java | 392 ---
.../java/org/apache/accumulo/test/VolumeIT.java | 568 -----
.../apache/accumulo/test/WaitForBalanceIT.java | 118 -
.../test/functional/AccumuloInputFormatIT.java | 210 --
.../accumulo/test/functional/AddSplitIT.java | 142 --
.../test/functional/BackupMasterIT.java | 68 -
.../test/functional/BadIteratorMincIT.java | 107 -
.../functional/BalanceAfterCommsFailureIT.java | 138 --
.../BalanceInPresenceOfOfflineTableIT.java | 201 --
.../test/functional/BatchScanSplitIT.java | 129 -
.../test/functional/BatchWriterFlushIT.java | 178 --
.../test/functional/BigRootTabletIT.java | 66 -
.../accumulo/test/functional/BinaryIT.java | 86 -
.../test/functional/BinaryStressIT.java | 107 -
.../accumulo/test/functional/BloomFilterIT.java | 256 --
.../accumulo/test/functional/BulkFileIT.java | 130 -
.../apache/accumulo/test/functional/BulkIT.java | 120 -
.../functional/BulkSplitOptimizationIT.java | 142 --
.../test/functional/ChaoticBalancerIT.java | 85 -
.../accumulo/test/functional/ClassLoaderIT.java | 104 -
.../accumulo/test/functional/CleanTmpIT.java | 112 -
.../accumulo/test/functional/CleanUpIT.java | 151 --
.../accumulo/test/functional/CloneTestIT.java | 295 ---
.../accumulo/test/functional/CombinerIT.java | 76 -
.../accumulo/test/functional/CompactionIT.java | 184 --
.../accumulo/test/functional/ConcurrencyIT.java | 158 --
.../functional/ConfigurableCompactionIT.java | 164 --
.../test/functional/ConfigurableMacBase.java | 182 --
.../accumulo/test/functional/ConstraintIT.java | 335 ---
.../test/functional/CreateAndUseIT.java | 130 -
.../test/functional/CreateManyScannersIT.java | 41 -
.../accumulo/test/functional/CredentialsIT.java | 124 -
.../test/functional/DeleteEverythingIT.java | 117 -
.../accumulo/test/functional/DeleteIT.java | 106 -
.../accumulo/test/functional/DeleteRowsIT.java | 154 --
.../test/functional/DeleteRowsSplitIT.java | 147 --
.../functional/DeleteTableDuringSplitIT.java | 109 -
.../functional/DeletedTablesDontFlushIT.java | 62 -
.../accumulo/test/functional/DurabilityIT.java | 222 --
.../test/functional/DynamicThreadPoolsIT.java | 126 -
.../accumulo/test/functional/ExamplesIT.java | 660 -----
.../test/functional/FateStarvationIT.java | 80 -
.../test/functional/FunctionalTestUtils.java | 192 --
.../test/functional/GarbageCollectorIT.java | 301 ---
.../test/functional/HalfDeadTServerIT.java | 218 --
.../accumulo/test/functional/KerberosIT.java | 573 -----
.../test/functional/KerberosProxyIT.java | 426 ----
.../accumulo/test/functional/LargeRowIT.java | 219 --
.../test/functional/LateLastContactIT.java | 49 -
.../accumulo/test/functional/LogicalTimeIT.java | 109 -
.../accumulo/test/functional/MapReduceIT.java | 92 -
.../test/functional/MasterAssignmentIT.java | 100 -
.../test/functional/MasterFailoverIT.java | 80 -
.../accumulo/test/functional/MaxOpenIT.java | 176 --
.../accumulo/test/functional/MergeIT.java | 194 --
.../accumulo/test/functional/MetadataIT.java | 148 --
.../test/functional/MetadataMaxFilesIT.java | 116 -
.../test/functional/MetadataSplitIT.java | 56 -
.../test/functional/MonitorLoggingIT.java | 121 -
.../accumulo/test/functional/MonitorSslIT.java | 132 -
.../accumulo/test/functional/NativeMapIT.java | 613 -----
.../accumulo/test/functional/PermissionsIT.java | 707 ------
.../accumulo/test/functional/ReadWriteIT.java | 456 ----
.../functional/RecoveryWithEmptyRFileIT.java | 109 -
.../test/functional/RegexGroupBalanceIT.java | 192 --
.../accumulo/test/functional/RenameIT.java | 74 -
.../accumulo/test/functional/RestartIT.java | 367 ---
.../test/functional/RestartStressIT.java | 153 --
.../accumulo/test/functional/RowDeleteIT.java | 109 -
.../accumulo/test/functional/ScanIdIT.java | 385 ---
.../test/functional/ScanIteratorIT.java | 134 --
.../accumulo/test/functional/ScanRangeIT.java | 244 --
.../test/functional/ScanSessionTimeOutIT.java | 142 --
.../accumulo/test/functional/ScannerIT.java | 121 -
.../test/functional/ServerSideErrorIT.java | 128 -
.../test/functional/SessionDurabilityIT.java | 153 --
.../accumulo/test/functional/ShutdownIT.java | 121 -
.../functional/SimpleBalancerFairnessIT.java | 117 -
.../test/functional/SparseColumnFamilyIT.java | 98 -
.../accumulo/test/functional/SplitIT.java | 223 --
.../test/functional/SplitRecoveryIT.java | 279 ---
.../apache/accumulo/test/functional/SslIT.java | 72 -
.../test/functional/SslWithClientAuthIT.java | 77 -
.../accumulo/test/functional/StartIT.java | 43 -
.../accumulo/test/functional/TableIT.java | 108 -
.../accumulo/test/functional/TabletIT.java | 101 -
.../functional/TabletStateChangeIteratorIT.java | 192 --
.../accumulo/test/functional/TimeoutIT.java | 120 -
.../accumulo/test/functional/VisibilityIT.java | 323 ---
.../accumulo/test/functional/WALSunnyDayIT.java | 234 --
.../test/functional/WatchTheWatchCountIT.java | 80 -
.../test/functional/WriteAheadLogIT.java | 79 -
.../accumulo/test/functional/WriteLotsIT.java | 89 -
.../accumulo/test/functional/ZooCacheIT.java | 75 -
.../test/functional/ZookeeperRestartIT.java | 87 -
.../test/performance/RollWALPerformanceIT.java | 120 -
.../performance/metadata/FastBulkImportIT.java | 103 -
.../accumulo/test/proxy/ProxyDurabilityIT.java | 145 --
.../accumulo/test/proxy/SimpleProxyBase.java | 2273 ------------------
.../accumulo/test/proxy/TBinaryProxyIT.java | 33 -
.../accumulo/test/proxy/TCompactProxyIT.java | 32 -
.../test/proxy/TJsonProtocolProxyIT.java | 33 -
.../accumulo/test/proxy/TTupleProxyIT.java | 33 -
.../accumulo/test/proxy/TestProxyClient.java | 204 --
.../test/proxy/TestProxyInstanceOperations.java | 84 -
.../accumulo/test/proxy/TestProxyReadWrite.java | 468 ----
.../test/proxy/TestProxySecurityOperations.java | 147 --
.../test/proxy/TestProxyTableOperations.java | 202 --
.../test/replication/CyclicReplicationIT.java | 332 ---
...bageCollectorCommunicatesWithTServersIT.java | 417 ----
.../test/replication/KerberosReplicationIT.java | 233 --
.../replication/MultiInstanceReplicationIT.java | 731 ------
.../replication/MultiTserverReplicationIT.java | 115 -
.../test/replication/ReplicationIT.java | 1436 -----------
.../replication/ReplicationRandomWalkIT.java | 67 -
.../test/replication/StatusCombinerMacIT.java | 118 -
.../UnorderedWorkAssignerReplicationIT.java | 731 ------
...UnusedWalDoesntCloseReplicationStatusIT.java | 219 --
.../server/security/SystemCredentialsIT.java | 233 --
.../accumulo/test/start/KeywordStartIT.java | 197 --
.../apache/accumulo/test/util/CertUtils.java | 348 ---
test/src/test/resources/FooConstraint.jar | Bin 2130 -> 0 bytes
test/src/test/resources/FooFilter.jar | Bin 1645 -> 0 bytes
test/src/test/resources/TestCombinerX.jar | Bin 4335 -> 0 bytes
test/src/test/resources/TestCombinerY.jar | Bin 4129 -> 0 bytes
test/src/test/resources/TestCompactionStrat.jar | Bin 2530 -> 0 bytes
test/src/test/resources/conf/accumulo-site.xml | 123 -
test/src/test/resources/conf/generic_logger.xml | 83 -
test/src/test/resources/conf/monitor_logger.xml | 64 -
test/src/test/resources/log4j.properties | 55 -
test/src/test/resources/randomwalk/Basic.xml | 37 -
test/src/test/resources/randomwalk/Simple.xml | 43 -
test/src/test/resources/unit/Basic.xml | 37 -
test/src/test/resources/unit/Simple.xml | 43 -
386 files changed, 39618 insertions(+), 39666 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/assemble/pom.xml
----------------------------------------------------------------------
diff --git a/assemble/pom.xml b/assemble/pom.xml
index 525b443..b965fe6 100644
--- a/assemble/pom.xml
+++ b/assemble/pom.xml
@@ -223,17 +223,6 @@
</build>
<profiles>
<profile>
- <id>test-jar</id>
- <dependencies>
- <dependency>
- <groupId>org.apache.accumulo</groupId>
- <artifactId>accumulo-test</artifactId>
- <version>${project.version}</version>
- <classifier>tests</classifier>
- </dependency>
- </dependencies>
- </profile>
- <profile>
<id>apache-release</id>
<build>
<plugins>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/pom.xml
----------------------------------------------------------------------
diff --git a/test/pom.xml b/test/pom.xml
index c68e158..57cfbbd 100644
--- a/test/pom.xml
+++ b/test/pom.xml
@@ -43,6 +43,10 @@
<artifactId>guava</artifactId>
</dependency>
<dependency>
+ <groupId>commons-cli</groupId>
+ <artifactId>commons-cli</artifactId>
+ </dependency>
+ <dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
@@ -51,6 +55,10 @@
<artifactId>commons-configuration</artifactId>
</dependency>
<dependency>
+ <groupId>commons-httpclient</groupId>
+ <artifactId>commons-httpclient</artifactId>
+ </dependency>
+ <dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
@@ -63,6 +71,10 @@
<artifactId>jline</artifactId>
</dependency>
<dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ </dependency>
+ <dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</dependency>
@@ -72,6 +84,10 @@
</dependency>
<dependency>
<groupId>org.apache.accumulo</groupId>
+ <artifactId>accumulo-examples-simple</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.accumulo</groupId>
<artifactId>accumulo-fate</artifactId>
</dependency>
<dependency>
@@ -128,42 +144,12 @@
<artifactId>hadoop-client</artifactId>
</dependency>
<dependency>
- <groupId>org.apache.thrift</groupId>
- <artifactId>libthrift</artifactId>
- </dependency>
- <dependency>
- <groupId>org.apache.zookeeper</groupId>
- <artifactId>zookeeper</artifactId>
- </dependency>
- <dependency>
- <groupId>commons-cli</groupId>
- <artifactId>commons-cli</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>commons-httpclient</groupId>
- <artifactId>commons-httpclient</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.apache.accumulo</groupId>
- <artifactId>accumulo-examples-simple</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
- <scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minikdc</artifactId>
- <scope>test</scope>
<exclusions>
<!-- Pulls in an older bouncycastle version -->
<exclusion>
@@ -173,29 +159,32 @@
</exclusions>
</dependency>
<dependency>
+ <groupId>org.apache.thrift</groupId>
+ <artifactId>libthrift</artifactId>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.zookeeper</groupId>
+ <artifactId>zookeeper</artifactId>
+ </dependency>
+ <dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk15on</artifactId>
- <scope>test</scope>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk15on</artifactId>
- <scope>test</scope>
</dependency>
<dependency>
<groupId>org.easymock</groupId>
<artifactId>easymock</artifactId>
- <scope>test</scope>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-server</artifactId>
- <scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
- <scope>test</scope>
</dependency>
</dependencies>
<build>
@@ -216,6 +205,8 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<configuration>
+ <testSourceDirectory>${project.basedir}/src/main/java/</testSourceDirectory>
+ <testClassesDirectory>${project.build.directory}/classes/</testClassesDirectory>
<systemPropertyVariables>
<timeout.factor>${timeout.factor}</timeout.factor>
<org.apache.accumulo.test.functional.useCredProviderForIT>${useCredProviderForIT}</org.apache.accumulo.test.functional.useCredProviderForIT>
@@ -240,32 +231,6 @@
</build>
<profiles>
<profile>
- <id>test-jar</id>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-jar-plugin</artifactId>
- <configuration>
- <archive>
- <manifestEntries>
- <Sealed>false</Sealed>
- </manifestEntries>
- </archive>
- </configuration>
- <executions>
- <execution>
- <id>make-test-jar</id>
- <goals>
- <goal>test-jar</goal>
- </goals>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
- </profile>
- <profile>
<id>shared-mini-for-it</id>
<!--
<activation>
@@ -336,7 +301,6 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-distcp</artifactId>
- <scope>test</scope>
</dependency>
</dependencies>
</profile>
@@ -355,7 +319,6 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-distcp</artifactId>
- <scope>test</scope>
</dependency>
</dependencies>
</profile>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
new file mode 100644
index 0000000..30058db
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.harness;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.accumulo.cluster.AccumuloCluster;
+import org.apache.accumulo.cluster.ClusterControl;
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.cluster.ClusterUsers;
+import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.admin.SecurityOperations;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.harness.conf.AccumuloClusterConfiguration;
+import org.apache.accumulo.harness.conf.AccumuloClusterPropertyConfiguration;
+import org.apache.accumulo.harness.conf.AccumuloMiniClusterConfiguration;
+import org.apache.accumulo.harness.conf.StandaloneAccumuloClusterConfiguration;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * General Integration-Test base class that provides access to an Accumulo instance for testing. This instance could be MAC or a standalone instance.
+ */
+public abstract class AccumuloClusterHarness extends AccumuloITBase implements MiniClusterConfigurationCallback, ClusterUsers {
+ private static final Logger log = LoggerFactory.getLogger(AccumuloClusterHarness.class);
+ private static final String TRUE = Boolean.toString(true);
+
+ public static enum ClusterType {
+ MINI, STANDALONE;
+
+ public boolean isDynamic() {
+ return this == MINI;
+ }
+ }
+
+ private static boolean initialized = false;
+
+ protected static AccumuloCluster cluster;
+ protected static ClusterType type;
+ protected static AccumuloClusterPropertyConfiguration clusterConf;
+ protected static TestingKdc krb;
+
+ @BeforeClass
+ public static void setUp() throws Exception {
+ clusterConf = AccumuloClusterPropertyConfiguration.get();
+ type = clusterConf.getClusterType();
+
+ if (ClusterType.MINI == type && TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION))) {
+ krb = new TestingKdc();
+ krb.start();
+ log.info("MiniKdc started");
+ }
+
+ initialized = true;
+ }
+
+ @AfterClass
+ public static void tearDownKdc() throws Exception {
+ if (null != krb) {
+ krb.stop();
+ }
+ }
+
+ /**
+ * The {@link TestingKdc} used for this {@link AccumuloCluster}. Might be null.
+ */
+ public static TestingKdc getKdc() {
+ return krb;
+ }
+
+ @Before
+ public void setupCluster() throws Exception {
+ // Before we try to instantiate the cluster, check to see if the test even wants to run against this type of cluster
+ Assume.assumeTrue(canRunTest(type));
+
+ switch (type) {
+ case MINI:
+ MiniClusterHarness miniClusterHarness = new MiniClusterHarness();
+ // Intrinsically performs the callback to let tests alter MiniAccumuloConfig and core-site.xml
+ MiniAccumuloClusterImpl impl = miniClusterHarness.create(this, getAdminToken(), krb);
+ cluster = impl;
+ // MAC makes a ClientConf for us, just set it
+ ((AccumuloMiniClusterConfiguration) clusterConf).setClientConf(impl.getClientConfig());
+ // Login as the "root" user
+ if (null != krb) {
+ ClusterUser rootUser = krb.getRootUser();
+ // Log in the 'client' user
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ }
+ break;
+ case STANDALONE:
+ StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
+ ClientConfiguration clientConf = conf.getClientConf();
+ StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance(), clientConf, conf.getTmpDirectory(), conf.getUsers(),
+ conf.getAccumuloServerUser());
+ // If these are provided in the configuration, pass them into the cluster
+ standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
+ standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir());
+ standaloneCluster.setServerAccumuloConfDir(conf.getServerAccumuloConfDir());
+ standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());
+
+ // For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in as SIMPLE instead of KERBEROS
+ Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration();
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ UserGroupInformation.setConfiguration(hadoopConfiguration);
+ // Login as the admin user to start the tests
+ UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(), conf.getAdminKeytab().getAbsolutePath());
+ }
+
+ // Set the implementation
+ cluster = standaloneCluster;
+ break;
+ default:
+ throw new RuntimeException("Unhandled type");
+ }
+
+ if (type.isDynamic()) {
+ cluster.start();
+ } else {
+ log.info("Removing tables which appear to be from a previous test run");
+ cleanupTables();
+ log.info("Removing users which appear to be from a previous test run");
+ cleanupUsers();
+ }
+
+ switch (type) {
+ case MINI:
+ if (null != krb) {
+ final String traceTable = Property.TRACE_TABLE.getDefaultValue();
+ final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
+
+ // Login as the trace user
+ UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
+
+ // Open a connector as the system user (ensures the user will exist for us to assign permissions to)
+ Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken(systemUser.getPrincipal(), systemUser.getKeytab(), true));
+
+ // Then, log back in as the "root" user and do the grant
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ conn = getConnector();
+
+ // Create the trace table
+ conn.tableOperations().create(traceTable);
+
+ // Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
+ // to have the ability to read, write and alter the trace table
+ conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.READ);
+ conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.WRITE);
+ conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.ALTER_TABLE);
+ }
+ break;
+ default:
+ // do nothing
+ }
+ }
+
+ public void cleanupTables() throws Exception {
+ final String tablePrefix = this.getClass().getSimpleName() + "_";
+ final TableOperations tops = getConnector().tableOperations();
+ for (String table : tops.list()) {
+ if (table.startsWith(tablePrefix)) {
+ log.debug("Removing table {}", table);
+ tops.delete(table);
+ }
+ }
+ }
+
+ public void cleanupUsers() throws Exception {
+ final String userPrefix = this.getClass().getSimpleName();
+ final SecurityOperations secOps = getConnector().securityOperations();
+ for (String user : secOps.listLocalUsers()) {
+ if (user.startsWith(userPrefix)) {
+ log.info("Dropping local user {}", user);
+ secOps.dropLocalUser(user);
+ }
+ }
+ }
+
+ @After
+ public void teardownCluster() throws Exception {
+ if (null != cluster) {
+ if (type.isDynamic()) {
+ cluster.stop();
+ } else {
+ log.info("Removing tables which appear to be from the current test");
+ cleanupTables();
+ log.info("Removing users which appear to be from the current test");
+ cleanupUsers();
+ }
+ }
+ }
+
+ public static AccumuloCluster getCluster() {
+ Preconditions.checkState(initialized);
+ return cluster;
+ }
+
+ public static ClusterControl getClusterControl() {
+ Preconditions.checkState(initialized);
+ return cluster.getClusterControl();
+ }
+
+ public static ClusterType getClusterType() {
+ Preconditions.checkState(initialized);
+ return type;
+ }
+
+ public static String getAdminPrincipal() {
+ Preconditions.checkState(initialized);
+ return clusterConf.getAdminPrincipal();
+ }
+
+ public static AuthenticationToken getAdminToken() {
+ Preconditions.checkState(initialized);
+ return clusterConf.getAdminToken();
+ }
+
+ @Override
+ public ClusterUser getAdminUser() {
+ switch (type) {
+ case MINI:
+ if (null == krb) {
+ PasswordToken passwordToken = (PasswordToken) getAdminToken();
+ return new ClusterUser(getAdminPrincipal(), new String(passwordToken.getPassword(), UTF_8));
+ }
+ return krb.getRootUser();
+ case STANDALONE:
+ return new ClusterUser(getAdminPrincipal(), ((StandaloneAccumuloClusterConfiguration) clusterConf).getAdminKeytab());
+ default:
+ throw new RuntimeException("Unknown cluster type");
+ }
+ }
+
+ @Override
+ public ClusterUser getUser(int offset) {
+ switch (type) {
+ case MINI:
+ if (null != krb) {
+ // Defer to the TestingKdc when kerberos is on so we can get the keytab instead of a password
+ return krb.getClientPrincipal(offset);
+ } else {
+ // Come up with a mostly unique name
+ String principal = getClass().getSimpleName() + "_" + testName.getMethodName() + "_" + offset;
+ // Username and password are the same
+ return new ClusterUser(principal, principal);
+ }
+ case STANDALONE:
+ return ((StandaloneAccumuloCluster) cluster).getUser(offset);
+ default:
+ throw new RuntimeException("Unknown cluster type");
+ }
+ }
+
+ public static FileSystem getFileSystem() throws IOException {
+ Preconditions.checkState(initialized);
+ return cluster.getFileSystem();
+ }
+
+ public static AccumuloClusterConfiguration getClusterConfiguration() {
+ Preconditions.checkState(initialized);
+ return clusterConf;
+ }
+
+ public Connector getConnector() {
+ try {
+ String princ = getAdminPrincipal();
+ AuthenticationToken token = getAdminToken();
+ log.debug("Creating connector as {} with {}", princ, token);
+ return cluster.getConnector(princ, token);
+ } catch (Exception e) {
+ log.error("Could not connect to Accumulo", e);
+ fail("Could not connect to Accumulo: " + e.getMessage());
+
+ throw new RuntimeException("Could not connect to Accumulo", e);
+ }
+ }
+
+ // TODO Really don't want this here. Will ultimately need to abstract configuration method away from MAConfig
+ // and change over to something more generic
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {}
+
+ /**
+ * A test may not be capable of running against a given AccumuloCluster. Implementations can override this method to advertise that they cannot (or perhaps do
+ * not) want to run the test.
+ */
+ public boolean canRunTest(ClusterType type) {
+ return true;
+ }
+
+ /**
+ * Tries to give a reasonable directory which can be used to create temporary files for the test. Makes a basic attempt to create the directory if it does not
+ * already exist.
+ *
+ * @return A directory which can be expected to exist on the Cluster's FileSystem
+ */
+ public Path getUsableDir() throws IllegalArgumentException, IOException {
+ return cluster.getTemporaryPath();
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/harness/AccumuloITBase.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/harness/AccumuloITBase.java b/test/src/main/java/org/apache/accumulo/harness/AccumuloITBase.java
new file mode 100644
index 0000000..8e2f6e0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/harness/AccumuloITBase.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.harness;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+
+import org.apache.commons.io.FileUtils;
+import org.junit.Rule;
+import org.junit.rules.TestName;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Methods, setup and/or infrastructure which are common to any Accumulo integration test.
+ */
+public class AccumuloITBase {
+ private static final Logger log = LoggerFactory.getLogger(AccumuloITBase.class);
+
+ @Rule
+ public TestName testName = new TestName();
+
+ public String[] getUniqueNames(int num) {
+ String[] names = new String[num];
+ for (int i = 0; i < num; i++)
+ names[i] = this.getClass().getSimpleName() + "_" + testName.getMethodName() + i;
+ return names;
+ }
+
+ /**
+ * Determines an appropriate directory name for holding generated ssl files for a test. The directory returned will have the same name as the provided
+ * directory, but with the suffix "-ssl" appended. This new directory is not created here, but is expected to be created as needed.
+ *
+ * @param baseDir
+ * the original directory, which the new directory will be created next to; it should exist
+ * @return the new directory (is not created)
+ */
+ public static File getSslDir(File baseDir) {
+ assertTrue(baseDir.exists() && baseDir.isDirectory());
+ return new File(baseDir.getParentFile(), baseDir.getName() + "-ssl");
+ }
+
+ public static File createTestDir(String name) {
+ File baseDir = new File(System.getProperty("user.dir") + "/target/mini-tests");
+ assertTrue(baseDir.mkdirs() || baseDir.isDirectory());
+ if (name == null)
+ return baseDir;
+ File testDir = new File(baseDir, name);
+ FileUtils.deleteQuietly(testDir);
+ assertTrue(testDir.mkdir());
+ return testDir;
+ }
+
+ /**
+ * If a given IT test has a method that takes longer than a class-set default timeout, declare it failed.
+ *
+ * Note that this provides a upper bound on test times, even in the presence of Test annotations with a timeout. That is, the Test annotatation can make the
+ * timing tighter but will not be able to allow a timeout that takes longer.
+ *
+ * Defaults to no timeout and can be changed via two mechanisms
+ *
+ * 1) A given IT class can override the defaultTimeoutSeconds method if test methods in that class should have a timeout. 2) The system property
+ * "timeout.factor" is used as a multiplier for the class provided default
+ *
+ * Note that if either of these values is '0' tests will run with no timeout. The default class level timeout is set to 0.
+ *
+ */
+ @Rule
+ public Timeout testsShouldTimeout() {
+ int waitLonger = 0;
+ try {
+ String timeoutString = System.getProperty("timeout.factor");
+ if (timeoutString != null && !timeoutString.isEmpty()) {
+ waitLonger = Integer.parseInt(timeoutString);
+ }
+ } catch (NumberFormatException exception) {
+ log.warn("Could not parse timeout.factor, defaulting to no timeout.");
+ }
+ return new Timeout(waitLonger * defaultTimeoutSeconds() * 1000);
+ }
+
+ /**
+ * time to wait per-method before declaring a timeout, in seconds.
+ */
+ protected int defaultTimeoutSeconds() {
+ return 0;
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/harness/MiniClusterConfigurationCallback.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/harness/MiniClusterConfigurationCallback.java b/test/src/main/java/org/apache/accumulo/harness/MiniClusterConfigurationCallback.java
new file mode 100644
index 0000000..5fa6eb5
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/harness/MiniClusterConfigurationCallback.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.harness;
+
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * Callback interface to inject configuration into the MiniAccumuloCluster or Hadoop core-site.xml file used by the MiniAccumuloCluster
+ */
+public interface MiniClusterConfigurationCallback {
+
+ public static class NoCallback implements MiniClusterConfigurationCallback {
+
+ private NoCallback() {}
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
+ return;
+ }
+ }
+
+ public static final MiniClusterConfigurationCallback NO_CALLBACK = new NoCallback();
+
+ void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite);
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/harness/MiniClusterHarness.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/harness/MiniClusterHarness.java b/test/src/main/java/org/apache/accumulo/harness/MiniClusterHarness.java
new file mode 100644
index 0000000..d923593
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/harness/MiniClusterHarness.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.harness;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.OutputStream;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.security.handler.KerberosAuthenticator;
+import org.apache.accumulo.server.security.handler.KerberosAuthorizor;
+import org.apache.accumulo.server.security.handler.KerberosPermissionHandler;
+import org.apache.accumulo.test.functional.NativeMapIT;
+import org.apache.accumulo.test.util.CertUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+
+/**
+ * Harness that sets up a MiniAccumuloCluster in a manner expected for Accumulo integration tests.
+ */
+public class MiniClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(MiniClusterHarness.class);
+
+ private static final AtomicLong COUNTER = new AtomicLong(0);
+
+ public static final String USE_SSL_FOR_IT_OPTION = "org.apache.accumulo.test.functional.useSslForIT",
+ USE_CRED_PROVIDER_FOR_IT_OPTION = "org.apache.accumulo.test.functional.useCredProviderForIT",
+ USE_KERBEROS_FOR_IT_OPTION = "org.apache.accumulo.test.functional.useKrbForIT", TRUE = Boolean.toString(true);
+
+ // TODO These are defined in MiniKdc >= 2.6.0. Can be removed when minimum Hadoop dependency is increased to that.
+ public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf", SUN_SECURITY_KRB5_DEBUG = "sun.security.krb5.debug";
+
+ /**
+ * Create a MiniAccumuloCluster using the given Token as the credentials for the root user.
+ */
+ public MiniAccumuloClusterImpl create(AuthenticationToken token) throws Exception {
+ return create(MiniClusterHarness.class.getName(), Long.toString(COUNTER.incrementAndGet()), token);
+ }
+
+ public MiniAccumuloClusterImpl create(AuthenticationToken token, TestingKdc kdc) throws Exception {
+ return create(MiniClusterHarness.class.getName(), Long.toString(COUNTER.incrementAndGet()), token, kdc);
+ }
+
+ public MiniAccumuloClusterImpl create(AccumuloITBase testBase, AuthenticationToken token) throws Exception {
+ return create(testBase.getClass().getName(), testBase.testName.getMethodName(), token);
+ }
+
+ public MiniAccumuloClusterImpl create(AccumuloITBase testBase, AuthenticationToken token, TestingKdc kdc) throws Exception {
+ return create(testBase, token, kdc, MiniClusterConfigurationCallback.NO_CALLBACK);
+ }
+
+ public MiniAccumuloClusterImpl create(AccumuloITBase testBase, AuthenticationToken token, TestingKdc kdc, MiniClusterConfigurationCallback configCallback)
+ throws Exception {
+ return create(testBase.getClass().getName(), testBase.testName.getMethodName(), token, configCallback, kdc);
+ }
+
+ public MiniAccumuloClusterImpl create(AccumuloClusterHarness testBase, AuthenticationToken token, TestingKdc kdc) throws Exception {
+ return create(testBase.getClass().getName(), testBase.testName.getMethodName(), token, testBase, kdc);
+ }
+
+ public MiniAccumuloClusterImpl create(AccumuloClusterHarness testBase, AuthenticationToken token, MiniClusterConfigurationCallback callback) throws Exception {
+ return create(testBase.getClass().getName(), testBase.testName.getMethodName(), token, callback);
+ }
+
+ public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token) throws Exception {
+ return create(testClassName, testMethodName, token, MiniClusterConfigurationCallback.NO_CALLBACK);
+ }
+
+ public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token, TestingKdc kdc) throws Exception {
+ return create(testClassName, testMethodName, token, MiniClusterConfigurationCallback.NO_CALLBACK, kdc);
+ }
+
+ public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token, MiniClusterConfigurationCallback configCallback)
+ throws Exception {
+ return create(testClassName, testMethodName, token, configCallback, null);
+ }
+
+ public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token,
+ MiniClusterConfigurationCallback configCallback, TestingKdc kdc) throws Exception {
+ Preconditions.checkNotNull(token);
+ Preconditions.checkArgument(token instanceof PasswordToken || token instanceof KerberosToken, "A PasswordToken or KerberosToken is required");
+
+ String rootPasswd;
+ if (token instanceof PasswordToken) {
+ rootPasswd = new String(((PasswordToken) token).getPassword(), Charsets.UTF_8);
+ } else {
+ rootPasswd = UUID.randomUUID().toString();
+ }
+
+ File baseDir = AccumuloClusterHarness.createTestDir(testClassName + "_" + testMethodName);
+ MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, rootPasswd);
+
+ // Enable native maps by default
+ cfg.setNativeLibPaths(NativeMapIT.nativeMapLocation().getAbsolutePath());
+ cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
+
+ Configuration coreSite = new Configuration(false);
+
+ // Setup SSL and credential providers if the properties request such
+ configureForEnvironment(cfg, getClass(), AccumuloClusterHarness.getSslDir(baseDir), coreSite, kdc);
+
+ // Invoke the callback for tests to configure MAC before it starts
+ configCallback.configureMiniCluster(cfg, coreSite);
+
+ MiniAccumuloClusterImpl miniCluster = new MiniAccumuloClusterImpl(cfg);
+
+ // Write out any configuration items to a file so HDFS will pick them up automatically (from the classpath)
+ if (coreSite.size() > 0) {
+ File csFile = new File(miniCluster.getConfig().getConfDir(), "core-site.xml");
+ if (csFile.exists())
+ throw new RuntimeException(csFile + " already exist");
+
+ OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(miniCluster.getConfig().getConfDir(), "core-site.xml")));
+ coreSite.writeXml(out);
+ out.close();
+ }
+
+ return miniCluster;
+ }
+
+ protected void configureForEnvironment(MiniAccumuloConfigImpl cfg, Class<?> testClass, File folder, Configuration coreSite, TestingKdc kdc) {
+ if (TRUE.equals(System.getProperty(USE_SSL_FOR_IT_OPTION))) {
+ configureForSsl(cfg, folder);
+ }
+ if (TRUE.equals(System.getProperty(USE_CRED_PROVIDER_FOR_IT_OPTION))) {
+ cfg.setUseCredentialProvider(true);
+ }
+
+ if (TRUE.equals(System.getProperty(USE_KERBEROS_FOR_IT_OPTION))) {
+ if (TRUE.equals(System.getProperty(USE_SSL_FOR_IT_OPTION))) {
+ throw new RuntimeException("Cannot use both SSL and Kerberos");
+ }
+
+ try {
+ configureForKerberos(cfg, folder, coreSite, kdc);
+ } catch (Exception e) {
+ throw new RuntimeException("Failed to initialize KDC", e);
+ }
+ }
+ }
+
+ protected void configureForSsl(MiniAccumuloConfigImpl cfg, File folder) {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ if (TRUE.equals(siteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
+ // already enabled; don't mess with it
+ return;
+ }
+
+ File sslDir = new File(folder, "ssl");
+ assertTrue(sslDir.mkdirs() || sslDir.isDirectory());
+ File rootKeystoreFile = new File(sslDir, "root-" + cfg.getInstanceName() + ".jks");
+ File localKeystoreFile = new File(sslDir, "local-" + cfg.getInstanceName() + ".jks");
+ File publicTruststoreFile = new File(sslDir, "public-" + cfg.getInstanceName() + ".jks");
+ final String rootKeystorePassword = "root_keystore_password", truststorePassword = "truststore_password";
+ try {
+ new CertUtils(Property.RPC_SSL_KEYSTORE_TYPE.getDefaultValue(), "o=Apache Accumulo,cn=MiniAccumuloCluster", "RSA", 2048, "sha1WithRSAEncryption")
+ .createAll(rootKeystoreFile, localKeystoreFile, publicTruststoreFile, cfg.getInstanceName(), rootKeystorePassword, cfg.getRootPassword(),
+ truststorePassword);
+ } catch (Exception e) {
+ throw new RuntimeException("error creating MAC keystore", e);
+ }
+
+ siteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true");
+ siteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), localKeystoreFile.getAbsolutePath());
+ siteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), cfg.getRootPassword());
+ siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), publicTruststoreFile.getAbsolutePath());
+ siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword);
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ protected void configureForKerberos(MiniAccumuloConfigImpl cfg, File folder, Configuration coreSite, TestingKdc kdc) throws Exception {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ if (TRUE.equals(siteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
+ throw new RuntimeException("Cannot use both SSL and SASL/Kerberos");
+ }
+
+ if (TRUE.equals(siteConfig.get(Property.INSTANCE_RPC_SASL_ENABLED.getKey()))) {
+ // already enabled
+ return;
+ }
+
+ if (null == kdc) {
+ throw new IllegalStateException("MiniClusterKdc was null");
+ }
+
+ log.info("Enabling Kerberos/SASL for minicluster");
+
+ // Turn on SASL and set the keytab/principal information
+ cfg.setProperty(Property.INSTANCE_RPC_SASL_ENABLED, "true");
+ ClusterUser serverUser = kdc.getAccumuloServerUser();
+ cfg.setProperty(Property.GENERAL_KERBEROS_KEYTAB, serverUser.getKeytab().getAbsolutePath());
+ cfg.setProperty(Property.GENERAL_KERBEROS_PRINCIPAL, serverUser.getPrincipal());
+ cfg.setProperty(Property.INSTANCE_SECURITY_AUTHENTICATOR, KerberosAuthenticator.class.getName());
+ cfg.setProperty(Property.INSTANCE_SECURITY_AUTHORIZOR, KerberosAuthorizor.class.getName());
+ cfg.setProperty(Property.INSTANCE_SECURITY_PERMISSION_HANDLER, KerberosPermissionHandler.class.getName());
+ // Piggy-back on the "system user" credential, but use it as a normal KerberosToken, not the SystemToken.
+ cfg.setProperty(Property.TRACE_USER, serverUser.getPrincipal());
+ cfg.setProperty(Property.TRACE_TOKEN_TYPE, KerberosToken.CLASS_NAME);
+
+ // Pass down some KRB5 debug properties
+ Map<String,String> systemProperties = cfg.getSystemProperties();
+ systemProperties.put(JAVA_SECURITY_KRB5_CONF, System.getProperty(JAVA_SECURITY_KRB5_CONF, ""));
+ systemProperties.put(SUN_SECURITY_KRB5_DEBUG, System.getProperty(SUN_SECURITY_KRB5_DEBUG, "false"));
+ cfg.setSystemProperties(systemProperties);
+
+ // Make sure UserGroupInformation will do the correct login
+ coreSite.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+
+ cfg.setRootUserName(kdc.getRootUser().getPrincipal());
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java b/test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
new file mode 100644
index 0000000..79340f2
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.harness;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.cluster.ClusterUsers;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Convenience class which starts a single MAC instance for a test to leverage.
+ *
+ * There isn't a good way to build this off of the {@link AccumuloClusterHarness} (as would be the logical place) because we need to start the
+ * MiniAccumuloCluster in a static BeforeClass-annotated method. Because it is static and invoked before any other BeforeClass methods in the implementation,
+ * the actual test classes can't expose any information to tell the base class that it is to perform the one-MAC-per-class semantics.
+ */
+public abstract class SharedMiniClusterBase extends AccumuloITBase implements ClusterUsers {
+ private static final Logger log = LoggerFactory.getLogger(SharedMiniClusterBase.class);
+ public static final String TRUE = Boolean.toString(true);
+
+ private static String principal = "root";
+ private static String rootPassword;
+ private static AuthenticationToken token;
+ private static MiniAccumuloClusterImpl cluster;
+ private static TestingKdc krb;
+
+ @BeforeClass
+ public static void startMiniCluster() throws Exception {
+ File baseDir = new File(System.getProperty("user.dir") + "/target/mini-tests");
+ assertTrue(baseDir.mkdirs() || baseDir.isDirectory());
+
+ // Make a shared MAC instance instead of spinning up one per test method
+ MiniClusterHarness harness = new MiniClusterHarness();
+
+ if (TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION))) {
+ krb = new TestingKdc();
+ krb.start();
+ // Enabled krb auth
+ Configuration conf = new Configuration(false);
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+ // Login as the client
+ ClusterUser rootUser = krb.getRootUser();
+ // Get the krb token
+ principal = rootUser.getPrincipal();
+ token = new KerberosToken(principal, rootUser.getKeytab(), true);
+ } else {
+ rootPassword = "rootPasswordShared1";
+ token = new PasswordToken(rootPassword);
+ }
+
+ cluster = harness.create(SharedMiniClusterBase.class.getName(), System.currentTimeMillis() + "_" + new Random().nextInt(Short.MAX_VALUE), token, krb);
+ cluster.start();
+
+ if (null != krb) {
+ final String traceTable = Property.TRACE_TABLE.getDefaultValue();
+ final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
+ // Login as the trace user
+ // Open a connector as the system user (ensures the user will exist for us to assign permissions to)
+ Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken(systemUser.getPrincipal(), systemUser.getKeytab(), true));
+
+ // Then, log back in as the "root" user and do the grant
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ conn = cluster.getConnector(principal, token);
+
+ // Create the trace table
+ conn.tableOperations().create(traceTable);
+
+ // Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
+ // to have the ability to read, write and alter the trace table
+ conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.READ);
+ conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.WRITE);
+ conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.ALTER_TABLE);
+ }
+ }
+
+ @AfterClass
+ public static void stopMiniCluster() throws Exception {
+ if (null != cluster) {
+ try {
+ cluster.stop();
+ } catch (Exception e) {
+ log.error("Failed to stop minicluster", e);
+ }
+ }
+ if (null != krb) {
+ try {
+ krb.stop();
+ } catch (Exception e) {
+ log.error("Failed to stop KDC", e);
+ }
+ }
+ }
+
+ public static String getRootPassword() {
+ return rootPassword;
+ }
+
+ public static AuthenticationToken getToken() {
+ if (token instanceof KerberosToken) {
+ try {
+ UserGroupInformation.loginUserFromKeytab(getPrincipal(), krb.getRootUser().getKeytab().getAbsolutePath());
+ } catch (IOException e) {
+ throw new RuntimeException("Failed to login", e);
+ }
+ }
+ return token;
+ }
+
+ public static String getPrincipal() {
+ return principal;
+ }
+
+ public static MiniAccumuloClusterImpl getCluster() {
+ return cluster;
+ }
+
+ public static File getMiniClusterDir() {
+ return cluster.getConfig().getDir();
+ }
+
+ public static Connector getConnector() {
+ try {
+ return getCluster().getConnector(principal, getToken());
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static TestingKdc getKdc() {
+ return krb;
+ }
+
+ @Override
+ public ClusterUser getAdminUser() {
+ if (null == krb) {
+ return new ClusterUser(getPrincipal(), getRootPassword());
+ } else {
+ return krb.getRootUser();
+ }
+ }
+
+ @Override
+ public ClusterUser getUser(int offset) {
+ if (null == krb) {
+ String user = SharedMiniClusterBase.class.getName() + "_" + testName.getMethodName() + "_" + offset;
+ // Password is the username
+ return new ClusterUser(user, user);
+ } else {
+ return krb.getClientPrincipal(offset);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/harness/TestingKdc.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/harness/TestingKdc.java b/test/src/main/java/org/apache/accumulo/harness/TestingKdc.java
new file mode 100644
index 0000000..9471274
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/harness/TestingKdc.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.harness;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.net.InetAddress;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.junit.Assert;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Creates a {@link MiniKdc} for tests to use to exercise secure Accumulo
+ */
+public class TestingKdc {
+ private static final Logger log = LoggerFactory.getLogger(TestingKdc.class);
+
+ public static final int NUM_USERS = 10;
+
+ protected MiniKdc kdc = null;
+ protected ClusterUser accumuloServerUser = null, accumuloAdmin = null;
+ protected List<ClusterUser> clientPrincipals = null;
+
+ public final String ORG_NAME = "EXAMPLE", ORG_DOMAIN = "COM";
+
+ private String hostname;
+ private File keytabDir;
+ private boolean started = false;
+
+ public TestingKdc() throws Exception {
+ this(computeKdcDir(), computeKeytabDir());
+ }
+
+ private static File computeKdcDir() {
+ File targetDir = new File(System.getProperty("user.dir"), "target");
+ Assert.assertTrue("Could not find Maven target directory: " + targetDir, targetDir.exists() && targetDir.isDirectory());
+
+ // Create the directories: target/kerberos/minikdc
+ File kdcDir = new File(new File(targetDir, "kerberos"), "minikdc");
+
+ assertTrue(kdcDir.mkdirs() || kdcDir.isDirectory());
+
+ return kdcDir;
+ }
+
+ private static File computeKeytabDir() {
+ File targetDir = new File(System.getProperty("user.dir"), "target");
+ Assert.assertTrue("Could not find Maven target directory: " + targetDir, targetDir.exists() && targetDir.isDirectory());
+
+ // Create the directories: target/kerberos/keytabs
+ File keytabDir = new File(new File(targetDir, "kerberos"), "keytabs");
+
+ assertTrue(keytabDir.mkdirs() || keytabDir.isDirectory());
+
+ return keytabDir;
+ }
+
+ public TestingKdc(File kdcDir, File keytabDir) throws Exception {
+ checkNotNull(kdcDir, "KDC directory was null");
+ checkNotNull(keytabDir, "Keytab directory was null");
+
+ this.keytabDir = keytabDir;
+ this.hostname = InetAddress.getLocalHost().getCanonicalHostName();
+
+ log.debug("Starting MiniKdc in {} with keytabs in {}", kdcDir, keytabDir);
+
+ Properties kdcConf = MiniKdc.createConf();
+ kdcConf.setProperty(MiniKdc.ORG_NAME, ORG_NAME);
+ kdcConf.setProperty(MiniKdc.ORG_DOMAIN, ORG_DOMAIN);
+ // kdcConf.setProperty(MiniKdc.DEBUG, "true");
+ kdc = new MiniKdc(kdcConf, kdcDir);
+ }
+
+ /**
+ * Starts the KDC and creates the principals and their keytabs
+ */
+ public synchronized void start() throws Exception {
+ checkArgument(!started, "KDC was already started");
+ kdc.start();
+ Thread.sleep(1000);
+
+ // Create the identity for accumulo servers
+ File accumuloKeytab = new File(keytabDir, "accumulo.keytab");
+ String accumuloPrincipal = String.format("accumulo/%s", hostname);
+
+ log.info("Creating Kerberos principal {} with keytab {}", accumuloPrincipal, accumuloKeytab);
+ kdc.createPrincipal(accumuloKeytab, accumuloPrincipal);
+
+ accumuloServerUser = new ClusterUser(qualifyUser(accumuloPrincipal), accumuloKeytab);
+
+ // Create the identity for the "root" user
+ String rootPrincipal = "root";
+ File rootKeytab = new File(keytabDir, rootPrincipal + ".keytab");
+
+ log.info("Creating Kerberos principal {} with keytab {}", rootPrincipal, rootKeytab);
+ kdc.createPrincipal(rootKeytab, rootPrincipal);
+
+ accumuloAdmin = new ClusterUser(qualifyUser(rootPrincipal), rootKeytab);
+
+ clientPrincipals = new ArrayList<>(NUM_USERS);
+ // Create a number of unprivileged users for tests to use
+ for (int i = 1; i <= NUM_USERS; i++) {
+ String clientPrincipal = "client" + i;
+ File clientKeytab = new File(keytabDir, clientPrincipal + ".keytab");
+
+ log.info("Creating Kerberos principal {} with keytab {}", clientPrincipal, clientKeytab);
+ kdc.createPrincipal(clientKeytab, clientPrincipal);
+
+ clientPrincipals.add(new ClusterUser(qualifyUser(clientPrincipal), clientKeytab));
+ }
+
+ started = true;
+ }
+
+ public synchronized void stop() throws Exception {
+ checkArgument(started, "KDC is not started");
+ kdc.stop();
+ started = false;
+ }
+
+ /**
+ * A directory where the automatically-created keytab files are written
+ */
+ public File getKeytabDir() {
+ return keytabDir;
+ }
+
+ /**
+ * A {@link ClusterUser} for Accumulo server processes to use
+ */
+ public ClusterUser getAccumuloServerUser() {
+ checkArgument(started, "The KDC is not started");
+ return accumuloServerUser;
+ }
+
+ /**
+ * A {@link ClusterUser} which is the Accumulo "root" user
+ */
+ public ClusterUser getRootUser() {
+ checkArgument(started, "The KDC is not started");
+ return accumuloAdmin;
+ }
+
+ /**
+ * The {@link ClusterUser} corresponding to the given offset. Represents an unprivileged user.
+ *
+ * @param offset
+ * The offset to fetch credentials for, valid through {@link #NUM_USERS}
+ */
+ public ClusterUser getClientPrincipal(int offset) {
+ checkArgument(started, "Client principal is not initialized, is the KDC started?");
+ checkArgument(offset >= 0 && offset < NUM_USERS, "Offset is invalid, must be non-negative and less than " + NUM_USERS);
+ return clientPrincipals.get(offset);
+ }
+
+ /**
+ * @see MiniKdc#createPrincipal(File, String...)
+ */
+ public void createPrincipal(File keytabFile, String... principals) throws Exception {
+ checkArgument(started, "KDC is not started");
+ kdc.createPrincipal(keytabFile, principals);
+ }
+
+ /**
+ * @return the name for the realm
+ */
+ public String getOrgName() {
+ return ORG_NAME;
+ }
+
+ /**
+ * @return the domain for the realm
+ */
+ public String getOrgDomain() {
+ return ORG_DOMAIN;
+ }
+
+ /**
+ * Qualify a username (only the primary from the kerberos principal) with the proper realm
+ *
+ * @param primary
+ * The primary or primary and instance
+ */
+ public String qualifyUser(String primary) {
+ return String.format("%s@%s.%s", primary, getOrgName(), getOrgDomain());
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloClusterConfiguration.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloClusterConfiguration.java b/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloClusterConfiguration.java
new file mode 100644
index 0000000..31ed94a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloClusterConfiguration.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.harness.conf;
+
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.harness.AccumuloClusterHarness.ClusterType;
+
+/**
+ * Base functionality that must be provided as configuration to the test
+ */
+public interface AccumuloClusterConfiguration {
+
+ ClusterType getClusterType();
+
+ String getAdminPrincipal();
+
+ AuthenticationToken getAdminToken();
+
+ ClientConfiguration getClientConf();
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloClusterPropertyConfiguration.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloClusterPropertyConfiguration.java b/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloClusterPropertyConfiguration.java
new file mode 100644
index 0000000..2300da3
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloClusterPropertyConfiguration.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.harness.conf;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Properties;
+
+import org.apache.accumulo.harness.AccumuloClusterHarness.ClusterType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Base class for extracting configuration values from Java Properties
+ */
+public abstract class AccumuloClusterPropertyConfiguration implements AccumuloClusterConfiguration {
+ private static final Logger log = LoggerFactory.getLogger(AccumuloClusterPropertyConfiguration.class);
+
+ public static final String ACCUMULO_IT_PROPERTIES_FILE = "accumulo.it.properties";
+
+ public static final String ACCUMULO_CLUSTER_TYPE_KEY = "accumulo.it.cluster.type";
+
+ public static final String ACCUMULO_MINI_PREFIX = "accumulo.it.cluster.mini.";
+ public static final String ACCUMULO_STANDALONE_PREFIX = "accumulo.it.cluster.standalone.";
+
+ public static final String ACCUMULO_CLUSTER_CLIENT_CONF_KEY = "accumulo.it.cluster.clientconf";
+
+ protected ClusterType clusterType;
+
+ public static AccumuloClusterPropertyConfiguration get() {
+ Properties systemProperties = System.getProperties();
+
+ String clusterTypeValue = null, clientConf = null;
+ String propertyFile = systemProperties.getProperty(ACCUMULO_IT_PROPERTIES_FILE);
+
+ if (null != propertyFile) {
+ // Check for properties provided in a file
+ File f = new File(propertyFile);
+ if (f.exists() && f.isFile() && f.canRead()) {
+ Properties fileProperties = new Properties();
+ FileReader reader = null;
+ try {
+ reader = new FileReader(f);
+ } catch (FileNotFoundException e) {
+ log.warn("Could not read properties from specified file: {}", propertyFile, e);
+ }
+
+ if (null != reader) {
+ try {
+ fileProperties.load(reader);
+ } catch (IOException e) {
+ log.warn("Could not load properties from specified file: {}", propertyFile, e);
+ } finally {
+ try {
+ reader.close();
+ } catch (IOException e) {
+ log.warn("Could not close reader", e);
+ }
+ }
+
+ clusterTypeValue = fileProperties.getProperty(ACCUMULO_CLUSTER_TYPE_KEY);
+ clientConf = fileProperties.getProperty(ACCUMULO_CLUSTER_CLIENT_CONF_KEY);
+ }
+ } else {
+ log.debug("Property file ({}) is not a readable file", propertyFile);
+ }
+ } else {
+ log.debug("No properties file found in {}", ACCUMULO_IT_PROPERTIES_FILE);
+ }
+
+ if (null == clusterTypeValue) {
+ clusterTypeValue = systemProperties.getProperty(ACCUMULO_CLUSTER_TYPE_KEY);
+ }
+
+ if (null == clientConf) {
+ clientConf = systemProperties.getProperty(ACCUMULO_CLUSTER_CLIENT_CONF_KEY);
+ }
+
+ ClusterType type;
+ if (null == clusterTypeValue) {
+ type = ClusterType.MINI;
+ } else {
+ type = ClusterType.valueOf(clusterTypeValue);
+ }
+
+ log.info("Using {} cluster type from system properties", type);
+
+ switch (type) {
+ case MINI:
+ // we'll let no client conf pass through and expect that the caller will set it after MAC is started
+ return new AccumuloMiniClusterConfiguration();
+ case STANDALONE:
+ if (null == clientConf) {
+ throw new RuntimeException("Expected client configuration to be provided: " + ACCUMULO_CLUSTER_CLIENT_CONF_KEY);
+ }
+ File clientConfFile = new File(clientConf);
+ if (!clientConfFile.exists() || !clientConfFile.isFile()) {
+ throw new RuntimeException("Client configuration should be a normal file: " + clientConfFile);
+ }
+ return new StandaloneAccumuloClusterConfiguration(clientConfFile);
+ default:
+ throw new RuntimeException("Clusters other than MiniAccumuloCluster are not yet implemented");
+ }
+ }
+
+ public Map<String,String> getConfiguration(ClusterType type) {
+ Preconditions.checkNotNull(type);
+
+ String prefix;
+ switch (type) {
+ case MINI:
+ prefix = ACCUMULO_MINI_PREFIX;
+ break;
+ case STANDALONE:
+ prefix = ACCUMULO_STANDALONE_PREFIX;
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown ClusterType: " + type);
+ }
+
+ Map<String,String> configuration = new HashMap<String,String>();
+
+ Properties systemProperties = System.getProperties();
+
+ String propertyFile = systemProperties.getProperty(ACCUMULO_IT_PROPERTIES_FILE);
+
+ // Check for properties provided in a file
+ if (null != propertyFile) {
+ File f = new File(propertyFile);
+ if (f.exists() && f.isFile() && f.canRead()) {
+ Properties fileProperties = new Properties();
+ FileReader reader = null;
+ try {
+ reader = new FileReader(f);
+ } catch (FileNotFoundException e) {
+ log.warn("Could not read properties from specified file: {}", propertyFile, e);
+ }
+
+ if (null != reader) {
+ try {
+ fileProperties.load(reader);
+ loadFromProperties(prefix, fileProperties, configuration);
+ } catch (IOException e) {
+ log.warn("Could not load properties from specified file: {}", propertyFile, e);
+ } finally {
+ try {
+ reader.close();
+ } catch (IOException e) {
+ log.warn("Could not close reader", e);
+ }
+ }
+ }
+ }
+ }
+
+ // Load any properties specified directly in the system properties
+ loadFromProperties(prefix, systemProperties, configuration);
+
+ return configuration;
+ }
+
+ protected void loadFromProperties(String desiredPrefix, Properties properties, Map<String,String> configuration) {
+ for (Entry<Object,Object> entry : properties.entrySet()) {
+ if (!(entry.getKey() instanceof String)) {
+ continue;
+ }
+
+ String key = (String) entry.getKey();
+ if (key.startsWith(desiredPrefix)) {
+ configuration.put(key, (String) entry.getValue());
+ }
+ }
+ }
+}
[12/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
deleted file mode 100644
index 9d0ce86..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
+++ /dev/null
@@ -1,660 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static com.google.common.base.Charsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
-import org.apache.accumulo.cluster.standalone.StandaloneClusterControl;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.user.AgeOffFilter;
-import org.apache.accumulo.core.iterators.user.SummingCombiner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.examples.simple.client.Flush;
-import org.apache.accumulo.examples.simple.client.RandomBatchScanner;
-import org.apache.accumulo.examples.simple.client.RandomBatchWriter;
-import org.apache.accumulo.examples.simple.client.ReadWriteExample;
-import org.apache.accumulo.examples.simple.client.RowOperations;
-import org.apache.accumulo.examples.simple.client.SequentialBatchWriter;
-import org.apache.accumulo.examples.simple.client.TraceDumpExample;
-import org.apache.accumulo.examples.simple.client.TracingExample;
-import org.apache.accumulo.examples.simple.combiner.StatsCombiner;
-import org.apache.accumulo.examples.simple.constraints.MaxMutationSize;
-import org.apache.accumulo.examples.simple.dirlist.Ingest;
-import org.apache.accumulo.examples.simple.dirlist.QueryUtil;
-import org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter;
-import org.apache.accumulo.examples.simple.helloworld.ReadData;
-import org.apache.accumulo.examples.simple.isolation.InterferenceTest;
-import org.apache.accumulo.examples.simple.mapreduce.RegexExample;
-import org.apache.accumulo.examples.simple.mapreduce.RowHash;
-import org.apache.accumulo.examples.simple.mapreduce.TableToFile;
-import org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest;
-import org.apache.accumulo.examples.simple.mapreduce.WordCount;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.GenerateTestData;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.SetupTable;
-import org.apache.accumulo.examples.simple.mapreduce.bulk.VerifyIngest;
-import org.apache.accumulo.examples.simple.shard.ContinuousQuery;
-import org.apache.accumulo.examples.simple.shard.Index;
-import org.apache.accumulo.examples.simple.shard.Query;
-import org.apache.accumulo.examples.simple.shard.Reverse;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.LogWriter;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.start.Main;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.tracer.TraceServer;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.util.Tool;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-public class ExamplesIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(ExamplesIT.class);
- private static final BatchWriterOpts bwOpts = new BatchWriterOpts();
- private static final BatchWriterConfig bwc = new BatchWriterConfig();
- private static final String visibility = "A|B";
- private static final String auths = "A,B";
-
- Connector c;
- String instance;
- String keepers;
- String user;
- String passwd;
- String keytab;
- BatchWriter bw;
- IteratorSetting is;
- String dir;
- FileSystem fs;
- Authorizations origAuths;
- boolean saslEnabled;
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopConf) {
- // 128MB * 3
- cfg.setDefaultMemory(cfg.getDefaultMemory() * 3, MemoryUnit.BYTE);
- }
-
- @Before
- public void getClusterInfo() throws Exception {
- c = getConnector();
- user = getAdminPrincipal();
- AuthenticationToken token = getAdminToken();
- if (token instanceof KerberosToken) {
- keytab = getAdminUser().getKeytab().getAbsolutePath();
- saslEnabled = true;
- } else if (token instanceof PasswordToken) {
- passwd = new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8);
- saslEnabled = false;
- } else {
- Assert.fail("Unknown token type: " + token);
- }
- fs = getCluster().getFileSystem();
- instance = c.getInstance().getInstanceName();
- keepers = c.getInstance().getZooKeepers();
- dir = new Path(cluster.getTemporaryPath(), getClass().getName()).toString();
-
- origAuths = c.securityOperations().getUserAuthorizations(user);
- c.securityOperations().changeUserAuthorizations(user, new Authorizations(auths.split(",")));
- }
-
- @After
- public void resetAuths() throws Exception {
- if (null != origAuths) {
- getConnector().securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
- }
- }
-
- @Override
- public int defaultTimeoutSeconds() {
- return 6 * 60;
- }
-
- @Test
- public void testTrace() throws Exception {
- Process trace = null;
- if (ClusterType.MINI == getClusterType()) {
- MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
- trace = impl.exec(TraceServer.class);
- while (!c.tableOperations().exists("trace"))
- UtilWaitThread.sleep(500);
- }
- String[] args;
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-C", "-D", "-c"};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-C", "-D", "-c"};
- }
- Entry<Integer,String> pair = cluster.getClusterControl().execWithStdout(TracingExample.class, args);
- Assert.assertEquals("Expected return code of zero. STDOUT=" + pair.getValue(), 0, pair.getKey().intValue());
- String result = pair.getValue();
- Pattern pattern = Pattern.compile("TraceID: ([0-9a-f]+)");
- Matcher matcher = pattern.matcher(result);
- int count = 0;
- while (matcher.find()) {
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--traceid", matcher.group(1)};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--traceid", matcher.group(1)};
- }
- pair = cluster.getClusterControl().execWithStdout(TraceDumpExample.class, args);
- count++;
- }
- assertTrue(count > 0);
- assertTrue("Output did not contain myApp@myHost", pair.getValue().contains("myApp@myHost"));
- if (ClusterType.MINI == getClusterType() && null != trace) {
- trace.destroy();
- }
- }
-
- @Test
- public void testClasspath() throws Exception {
- Entry<Integer,String> entry = getCluster().getClusterControl().execWithStdout(Main.class, new String[] {"classpath"});
- assertEquals(0, entry.getKey().intValue());
- String result = entry.getValue();
- int level1 = result.indexOf("Level 1");
- int level2 = result.indexOf("Level 2");
- int level3 = result.indexOf("Level 3");
- int level4 = result.indexOf("Level 4");
- assertTrue("Level 1 classloader not present.", level1 >= 0);
- assertTrue("Level 2 classloader not present.", level2 > 0);
- assertTrue("Level 3 classloader not present.", level3 > 0);
- assertTrue("Level 4 classloader not present.", level4 > 0);
- assertTrue(level1 < level2);
- assertTrue(level2 < level3);
- assertTrue(level3 < level4);
- }
-
- @Test
- public void testDirList() throws Exception {
- String[] names = getUniqueNames(3);
- String dirTable = names[0], indexTable = names[1], dataTable = names[2];
- String[] args;
- String dirListDirectory;
- switch (getClusterType()) {
- case MINI:
- dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir().getAbsolutePath();
- break;
- case STANDALONE:
- dirListDirectory = ((StandaloneAccumuloCluster) getCluster()).getAccumuloHome();
- break;
- default:
- throw new RuntimeException("Unknown cluster type");
- }
- // Index a directory listing on /tmp. If this is running against a standalone cluster, we can't guarantee Accumulo source will be there.
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
- dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
- dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
- }
- Entry<Integer,String> entry = getClusterControl().execWithStdout(Ingest.class, args);
- assertEquals("Got non-zero return code. Stdout=" + entry.getValue(), 0, entry.getKey().intValue());
-
- String expectedFile;
- switch (getClusterType()) {
- case MINI:
- // Should be present in a minicluster dir
- expectedFile = "accumulo-site.xml";
- break;
- case STANDALONE:
- // Should be in place on standalone installs (not having ot follow symlinks)
- expectedFile = "LICENSE";
- break;
- default:
- throw new RuntimeException("Unknown cluster type");
- }
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "--keytab", keytab, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path",
- expectedFile};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-p", passwd, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path", expectedFile};
- }
- entry = getClusterControl().execWithStdout(QueryUtil.class, args);
- if (ClusterType.MINI == getClusterType()) {
- MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
- for (LogWriter writer : impl.getLogWriters()) {
- writer.flush();
- }
- }
-
- log.info("result " + entry.getValue());
- assertEquals(0, entry.getKey().intValue());
- assertTrue(entry.getValue().contains(expectedFile));
- }
-
- @Test
- public void testAgeoffFilter() throws Exception {
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- is = new IteratorSetting(10, AgeOffFilter.class);
- AgeOffFilter.setTTL(is, 1000L);
- c.tableOperations().attachIterator(tableName, is);
- UtilWaitThread.sleep(500); // let zookeeper updates propagate.
- bw = c.createBatchWriter(tableName, bwc);
- Mutation m = new Mutation("foo");
- m.put("a", "b", "c");
- bw.addMutation(m);
- bw.close();
- UtilWaitThread.sleep(1000);
- assertEquals(0, Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator()));
- }
-
- @Test
- public void testStatsCombiner() throws Exception {
- String table = getUniqueNames(1)[0];
- c.tableOperations().create(table);
- is = new IteratorSetting(10, StatsCombiner.class);
- StatsCombiner.setCombineAllColumns(is, true);
-
- c.tableOperations().attachIterator(table, is);
- bw = c.createBatchWriter(table, bwc);
- // Write two mutations otherwise the NativeMap would dedupe them into a single update
- Mutation m = new Mutation("foo");
- m.put("a", "b", "1");
- bw.addMutation(m);
- m = new Mutation("foo");
- m.put("a", "b", "3");
- bw.addMutation(m);
- bw.flush();
-
- Iterator<Entry<Key,Value>> iter = c.createScanner(table, Authorizations.EMPTY).iterator();
- assertTrue("Iterator had no results", iter.hasNext());
- Entry<Key,Value> e = iter.next();
- assertEquals("Results ", "1,3,4,2", e.getValue().toString());
- assertFalse("Iterator had additional results", iter.hasNext());
-
- m = new Mutation("foo");
- m.put("a", "b", "0,20,20,2");
- bw.addMutation(m);
- bw.close();
-
- iter = c.createScanner(table, Authorizations.EMPTY).iterator();
- assertTrue("Iterator had no results", iter.hasNext());
- e = iter.next();
- assertEquals("Results ", "0,20,24,4", e.getValue().toString());
- assertFalse("Iterator had additional results", iter.hasNext());
- }
-
- @Test
- public void testBloomFilters() throws Exception {
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
- String[] args;
- if (saslEnabled) {
- args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "100000", "--min", "0", "--max",
- "1000000000", "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName};
- } else {
- args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "100000", "--min", "0", "--max", "1000000000",
- "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName};
- }
- goodExec(RandomBatchWriter.class, args);
- c.tableOperations().flush(tableName, null, null, true);
- long diff = 0, diff2 = 0;
- // try the speed test a couple times in case the system is loaded with other tests
- for (int i = 0; i < 2; i++) {
- long now = System.currentTimeMillis();
- if (saslEnabled) {
- args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max",
- "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName};
- } else {
- args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000",
- "--size", "50", "--scanThreads", "4", "-t", tableName};
- }
- goodExec(RandomBatchScanner.class, args);
- diff = System.currentTimeMillis() - now;
- now = System.currentTimeMillis();
- if (saslEnabled) {
- args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max",
- "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName};
- } else {
- args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000",
- "--size", "50", "--scanThreads", "4", "-t", tableName};
- }
- int retCode = getClusterControl().exec(RandomBatchScanner.class, args);
- assertEquals(1, retCode);
- diff2 = System.currentTimeMillis() - now;
- if (diff2 < diff)
- break;
- }
- assertTrue(diff2 < diff);
- }
-
- @Test
- public void testShardedIndex() throws Exception {
- String[] names = getUniqueNames(3);
- final String shard = names[0], index = names[1];
- c.tableOperations().create(shard);
- c.tableOperations().create(index);
- bw = c.createBatchWriter(shard, bwc);
- Index.index(30, new File(System.getProperty("user.dir") + "/src"), "\\W+", bw);
- bw.close();
- BatchScanner bs = c.createBatchScanner(shard, Authorizations.EMPTY, 4);
- List<String> found = Query.query(bs, Arrays.asList("foo", "bar"));
- bs.close();
- // should find ourselves
- boolean thisFile = false;
- for (String file : found) {
- if (file.endsWith("/ExamplesIT.java"))
- thisFile = true;
- }
- assertTrue(thisFile);
-
- String[] args;
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", getAdminPrincipal(), "-p", passwd};
- }
- // create a reverse index
- goodExec(Reverse.class, args);
-
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab, "--terms", "5",
- "--count", "1000"};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "-p", passwd, "--terms", "5", "--count",
- "1000"};
- }
- // run some queries
- goodExec(ContinuousQuery.class, args);
- }
-
- @Test
- public void testMaxMutationConstraint() throws Exception {
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().addConstraint(tableName, MaxMutationSize.class.getName());
- TestIngest.Opts opts = new TestIngest.Opts();
- opts.rows = 1;
- opts.cols = 1000;
- opts.setTableName(tableName);
- if (saslEnabled) {
- opts.updateKerberosCredentials(cluster.getClientConfig());
- } else {
- opts.setPrincipal(getAdminPrincipal());
- }
- try {
- TestIngest.ingest(c, opts, bwOpts);
- } catch (MutationsRejectedException ex) {
- assertEquals(1, ex.getConstraintViolationSummaries().size());
- }
- }
-
- @Test
- public void testBulkIngest() throws Exception {
- // TODO Figure out a way to run M/R with Kerberos
- Assume.assumeTrue(getAdminToken() instanceof PasswordToken);
- String tableName = getUniqueNames(1)[0];
- FileSystem fs = getFileSystem();
- Path p = new Path(dir, "tmp");
- if (fs.exists(p)) {
- fs.delete(p, true);
- }
- goodExec(GenerateTestData.class, "--start-row", "0", "--count", "10000", "--output", dir + "/tmp/input/data");
-
- List<String> commonArgs = new ArrayList<>(Arrays.asList(new String[] {"-i", instance, "-z", keepers, "-u", user, "--table", tableName}));
- if (saslEnabled) {
- commonArgs.add("--keytab");
- commonArgs.add(keytab);
- } else {
- commonArgs.add("-p");
- commonArgs.add(passwd);
- }
-
- List<String> args = new ArrayList<>(commonArgs);
- goodExec(SetupTable.class, args.toArray(new String[0]));
-
- args = new ArrayList<>(commonArgs);
- args.addAll(Arrays.asList(new String[] {"--inputDir", dir + "/tmp/input", "--workDir", dir + "/tmp"}));
- goodExec(BulkIngestExample.class, args.toArray(new String[0]));
-
- args = new ArrayList<>(commonArgs);
- args.addAll(Arrays.asList(new String[] {"--start-row", "0", "--count", "10000"}));
- goodExec(VerifyIngest.class, args.toArray(new String[0]));
- }
-
- @Test
- public void testTeraSortAndRead() throws Exception {
- // TODO Figure out a way to run M/R with Kerberos
- Assume.assumeTrue(getAdminToken() instanceof PasswordToken);
- String tableName = getUniqueNames(1)[0];
- String[] args;
- if (saslEnabled) {
- args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers,
- "-u", user, "--keytab", keytab, "--splits", "4"};
- } else {
- args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers,
- "-u", user, "-p", passwd, "--splits", "4"};
- }
- goodExec(TeraSortIngest.class, args);
- Path output = new Path(dir, "tmp/nines");
- if (fs.exists(output)) {
- fs.delete(output, true);
- }
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--rowRegex", ".*999.*", "--output",
- output.toString()};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--rowRegex", ".*999.*", "--output", output.toString()};
- }
- goodExec(RegexExample.class, args);
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--column", "c:"};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--column", "c:"};
- }
- goodExec(RowHash.class, args);
- output = new Path(dir, "tmp/tableFile");
- if (fs.exists(output)) {
- fs.delete(output, true);
- }
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--output", output.toString()};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--output", output.toString()};
- }
- goodExec(TableToFile.class, args);
- }
-
- @Test
- public void testWordCount() throws Exception {
- // TODO Figure out a way to run M/R with Kerberos
- Assume.assumeTrue(getAdminToken() instanceof PasswordToken);
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- is = new IteratorSetting(10, SummingCombiner.class);
- SummingCombiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column(new Text("count"))));
- SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
- c.tableOperations().attachIterator(tableName, is);
- fs.copyFromLocalFile(new Path(new Path(System.getProperty("user.dir")).getParent(), "README.md"), new Path(dir + "/tmp/wc/README.md"));
- String[] args;
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-u", user, "--keytab", keytab, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName};
- } else {
- args = new String[] {"-i", instance, "-u", user, "-p", passwd, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName};
- }
- goodExec(WordCount.class, args);
- }
-
- @Test
- public void testInsertWithBatchWriterAndReadData() throws Exception {
- String tableName = getUniqueNames(1)[0];
- String[] args;
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName};
- }
- goodExec(InsertWithBatchWriter.class, args);
- goodExec(ReadData.class, args);
- }
-
- @Test
- public void testIsolatedScansWithInterference() throws Exception {
- String[] args;
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
- }
- goodExec(InterferenceTest.class, args);
- }
-
- @Test
- public void testScansWithInterference() throws Exception {
- String[] args;
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000"};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000"};
- }
- goodExec(InterferenceTest.class, args);
- }
-
- @Test
- public void testRowOperations() throws Exception {
- String[] args;
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd};
- }
- goodExec(RowOperations.class, args);
- }
-
- @Test
- public void testBatchWriter() throws Exception {
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- String[] args;
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50",
- "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50",
- "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
- }
- goodExec(SequentialBatchWriter.class, args);
-
- }
-
- @Test
- public void testReadWriteAndDelete() throws Exception {
- String tableName = getUniqueNames(1)[0];
- String[] args;
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "--createtable", "-c",
- "--debug"};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "--createtable", "-c", "--debug"};
- }
- goodExec(ReadWriteExample.class, args);
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "-d", "--debug"};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "-d", "--debug"};
- }
- goodExec(ReadWriteExample.class, args);
-
- }
-
- @Test
- public void testRandomBatchesAndFlush() throws Exception {
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- String[] args;
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "100000", "--min", "0", "--max",
- "100000", "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "100000", "--min", "0", "--max", "100000",
- "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
- }
- goodExec(RandomBatchWriter.class, args);
-
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "10000", "--min", "0", "--max",
- "100000", "--size", "100", "--scanThreads", "4", "--auths", auths};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "10000", "--min", "0", "--max", "100000",
- "--size", "100", "--scanThreads", "4", "--auths", auths};
- }
- goodExec(RandomBatchScanner.class, args);
-
- if (saslEnabled) {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName};
- } else {
- args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName};
- }
- goodExec(Flush.class, args);
- }
-
- private void goodExec(Class<?> theClass, String... args) throws InterruptedException, IOException {
- Entry<Integer,String> pair;
- if (Tool.class.isAssignableFrom(theClass) && ClusterType.STANDALONE == getClusterType()) {
- StandaloneClusterControl control = (StandaloneClusterControl) getClusterControl();
- pair = control.execMapreduceWithStdout(theClass, args);
- } else {
- // We're already slurping stdout into memory (not redirecting to file). Might as well add it to error message.
- pair = getClusterControl().execWithStdout(theClass, args);
- }
- Assert.assertEquals("stdout=" + pair.getValue(), 0, pair.getKey().intValue());
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java b/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
deleted file mode 100644
index b75a74e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-/**
- * See ACCUMULO-779
- */
-public class FateStarvationIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void run() throws Exception {
- String tableName = getUniqueNames(1)[0];
- Connector c = getConnector();
- c.tableOperations().create(tableName);
-
- c.tableOperations().addSplits(tableName, TestIngest.getSplitPoints(0, 100000, 50));
-
- TestIngest.Opts opts = new TestIngest.Opts();
- opts.random = 89;
- opts.timestamp = 7;
- opts.dataSize = 50;
- opts.rows = 100000;
- opts.cols = 1;
- opts.setTableName(tableName);
- ClientConfiguration clientConf = cluster.getClientConfig();
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConf);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- }
- TestIngest.ingest(c, opts, new BatchWriterOpts());
-
- c.tableOperations().flush(tableName, null, null, true);
-
- List<Text> splits = new ArrayList<Text>(TestIngest.getSplitPoints(0, 100000, 67));
- Random rand = new Random();
-
- for (int i = 0; i < 100; i++) {
- int idx1 = rand.nextInt(splits.size() - 1);
- int idx2 = rand.nextInt(splits.size() - (idx1 + 1)) + idx1 + 1;
-
- c.tableOperations().compact(tableName, splits.get(idx1), splits.get(idx2), false, false);
- }
-
- c.tableOperations().offline(tableName);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
deleted file mode 100644
index 05d0562..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertFalse;
-
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.LogWriter;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-
-import com.google.common.collect.Iterators;
-
-public class FunctionalTestUtils {
-
- public static int countRFiles(Connector c, String tableName) throws Exception {
- Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- String tableId = c.tableOperations().tableIdMap().get(tableName);
- scanner.setRange(MetadataSchema.TabletsSection.getRange(tableId));
- scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
- return Iterators.size(scanner.iterator());
- }
-
- static void checkRFiles(Connector c, String tableName, int minTablets, int maxTablets, int minRFiles, int maxRFiles) throws Exception {
- Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- String tableId = c.tableOperations().tableIdMap().get(tableName);
- scanner.setRange(new Range(new Text(tableId + ";"), true, new Text(tableId + "<"), true));
- scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
- MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
-
- HashMap<Text,Integer> tabletFileCounts = new HashMap<Text,Integer>();
-
- for (Entry<Key,Value> entry : scanner) {
-
- Text row = entry.getKey().getRow();
-
- Integer count = tabletFileCounts.get(row);
- if (count == null)
- count = 0;
- if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
- count = count + 1;
- }
-
- tabletFileCounts.put(row, count);
- }
-
- if (tabletFileCounts.size() < minTablets || tabletFileCounts.size() > maxTablets) {
- throw new Exception("Did not find expected number of tablets " + tabletFileCounts.size());
- }
-
- Set<Entry<Text,Integer>> es = tabletFileCounts.entrySet();
- for (Entry<Text,Integer> entry : es) {
- if (entry.getValue() > maxRFiles || entry.getValue() < minRFiles) {
- throw new Exception("tablet " + entry.getKey() + " has " + entry.getValue() + " map files");
- }
- }
- }
-
- static public void bulkImport(Connector c, FileSystem fs, String table, String dir) throws Exception {
- String failDir = dir + "_failures";
- Path failPath = new Path(failDir);
- fs.delete(failPath, true);
- fs.mkdirs(failPath);
-
- // Ensure server can read/modify files
- FsShell fsShell = new FsShell(fs.getConf());
- Assert.assertEquals("Failed to chmod " + dir, 0, fsShell.run(new String[] {"-chmod", "-R", "777", dir}));
- Assert.assertEquals("Failed to chmod " + failDir, 0, fsShell.run(new String[] {"-chmod", "-R", "777", failDir}));
-
- c.tableOperations().importDirectory(table, dir, failDir, false);
-
- if (fs.listStatus(failPath).length > 0) {
- throw new Exception("Some files failed to bulk import");
- }
-
- }
-
- static public void checkSplits(Connector c, String table, int min, int max) throws Exception {
- Collection<Text> splits = c.tableOperations().listSplits(table);
- if (splits.size() < min || splits.size() > max) {
- throw new Exception("# of table splits points out of range, #splits=" + splits.size() + " table=" + table + " min=" + min + " max=" + max);
- }
- }
-
- static public void createRFiles(final Connector c, FileSystem fs, String path, int rows, int splits, int threads) throws Exception {
- fs.delete(new Path(path), true);
- ExecutorService threadPool = Executors.newFixedThreadPool(threads);
- final AtomicBoolean fail = new AtomicBoolean(false);
- for (int i = 0; i < rows; i += rows / splits) {
- final TestIngest.Opts opts = new TestIngest.Opts();
- opts.outputFile = String.format("%s/mf%s", path, i);
- opts.random = 56;
- opts.timestamp = 1;
- opts.dataSize = 50;
- opts.rows = rows / splits;
- opts.startRow = i;
- opts.cols = 1;
- threadPool.execute(new Runnable() {
- @Override
- public void run() {
- try {
- TestIngest.ingest(c, opts, new BatchWriterOpts());
- } catch (Exception e) {
- fail.set(true);
- }
- }
- });
- }
- threadPool.shutdown();
- threadPool.awaitTermination(1, TimeUnit.HOURS);
- assertFalse(fail.get());
- }
-
- static public String readAll(InputStream is) throws IOException {
- byte[] buffer = new byte[4096];
- StringBuffer result = new StringBuffer();
- while (true) {
- int n = is.read(buffer);
- if (n <= 0)
- break;
- result.append(new String(buffer, 0, n));
- }
- return result.toString();
- }
-
- public static String readAll(MiniAccumuloClusterImpl c, Class<?> klass, Process p) throws Exception {
- for (LogWriter writer : c.getLogWriters())
- writer.flush();
- return readAll(new FileInputStream(c.getConfig().getLogDir() + "/" + klass.getSimpleName() + "_" + p.hashCode() + ".out"));
- }
-
- static Mutation nm(String row, String cf, String cq, Value value) {
- Mutation m = new Mutation(new Text(row));
- m.put(new Text(cf), new Text(cq), value);
- return m;
- }
-
- static Mutation nm(String row, String cf, String cq, String value) {
- return nm(row, cf, cq, new Value(value.getBytes()));
- }
-
- public static SortedSet<Text> splits(String[] splits) {
- SortedSet<Text> result = new TreeSet<Text>();
- for (String split : splits)
- result.add(new Text(split));
- return result;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
deleted file mode 100644
index a73f239..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.ServerServices;
-import org.apache.accumulo.core.util.ServerServices.Service;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooLock;
-import org.apache.accumulo.gc.SimpleGarbageCollector;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessNotFoundException;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.NoNodeException;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class GarbageCollectorIT extends ConfigurableMacBase {
- private static final String OUR_SECRET = "itsreallysecret";
-
- @Override
- public int defaultTimeoutSeconds() {
- return 5 * 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- cfg.setProperty(Property.INSTANCE_SECRET, OUR_SECRET);
- cfg.setProperty(Property.GC_CYCLE_START, "1");
- cfg.setProperty(Property.GC_CYCLE_DELAY, "1");
- cfg.setProperty(Property.GC_PORT, "0");
- cfg.setProperty(Property.TSERV_MAXMEM, "5K");
- cfg.setProperty(Property.TSERV_MAJC_DELAY, "1");
-
- // use raw local file system so walogs sync and flush will work
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- private void killMacGc() throws ProcessNotFoundException, InterruptedException, KeeperException {
- // kill gc started by MAC
- getCluster().killProcess(ServerType.GARBAGE_COLLECTOR, getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR).iterator().next());
- // delete lock in zookeeper if there, this will allow next GC to start quickly
- String path = ZooUtil.getRoot(new ZooKeeperInstance(getCluster().getClientConfig())) + Constants.ZGC_LOCK;
- ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET);
- try {
- ZooLock.deleteLock(zk, path);
- } catch (IllegalStateException e) {
-
- }
-
- assertNull(getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR));
- }
-
- @Test
- public void gcTest() throws Exception {
- killMacGc();
- Connector c = getConnector();
- c.tableOperations().create("test_ingest");
- c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
- TestIngest.Opts opts = new TestIngest.Opts();
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
- vopts.rows = opts.rows = 10000;
- vopts.cols = opts.cols = 1;
- opts.setPrincipal("root");
- vopts.setPrincipal("root");
- TestIngest.ingest(c, opts, new BatchWriterOpts());
- c.tableOperations().compact("test_ingest", null, null, true, true);
- int before = countFiles();
- while (true) {
- UtilWaitThread.sleep(1000);
- int more = countFiles();
- if (more <= before)
- break;
- before = more;
- }
-
- // restart GC
- getCluster().start();
- UtilWaitThread.sleep(15 * 1000);
- int after = countFiles();
- VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
- assertTrue(after < before);
- }
-
- @Test
- public void gcLotsOfCandidatesIT() throws Exception {
- killMacGc();
-
- log.info("Filling metadata table with bogus delete flags");
- Connector c = getConnector();
- addEntries(c, new BatchWriterOpts());
- cluster.getConfig().setDefaultMemory(10, MemoryUnit.MEGABYTE);
- Process gc = cluster.exec(SimpleGarbageCollector.class);
- UtilWaitThread.sleep(20 * 1000);
- String output = FunctionalTestUtils.readAll(cluster, SimpleGarbageCollector.class, gc);
- gc.destroy();
- assertTrue(output.contains("delete candidates has exceeded"));
- }
-
- @Test
- public void dontGCRootLog() throws Exception {
- killMacGc();
- // dirty metadata
- Connector c = getConnector();
- String table = getUniqueNames(1)[0];
- c.tableOperations().create(table);
- // let gc run for a bit
- cluster.start();
- UtilWaitThread.sleep(20 * 1000);
- killMacGc();
- // kill tservers
- for (ProcessReference ref : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, ref);
- }
- // run recovery
- cluster.start();
- // did it recover?
- Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- Iterators.size(scanner.iterator());
- }
-
- private Mutation createDelMutation(String path, String cf, String cq, String val) {
- Text row = new Text(MetadataSchema.DeletesSection.getRowPrefix() + path);
- Mutation delFlag = new Mutation(row);
- delFlag.put(cf, cq, val);
- return delFlag;
- }
-
- @Test
- public void testInvalidDelete() throws Exception {
- killMacGc();
-
- String table = getUniqueNames(1)[0];
- getConnector().tableOperations().create(table);
-
- BatchWriter bw2 = getConnector().createBatchWriter(table, new BatchWriterConfig());
- Mutation m1 = new Mutation("r1");
- m1.put("cf1", "cq1", "v1");
- bw2.addMutation(m1);
- bw2.close();
-
- getConnector().tableOperations().flush(table, null, null, true);
-
- // ensure an invalid delete entry does not cause GC to go berserk ACCUMULO-2520
- getConnector().securityOperations().grantTablePermission(getConnector().whoami(), MetadataTable.NAME, TablePermission.WRITE);
- BatchWriter bw3 = getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-
- bw3.addMutation(createDelMutation("", "", "", ""));
- bw3.addMutation(createDelMutation("", "testDel", "test", "valueTest"));
- bw3.addMutation(createDelMutation("/", "", "", ""));
- bw3.close();
-
- Process gc = cluster.exec(SimpleGarbageCollector.class);
- try {
- String output = "";
- while (!output.contains("Ingoring invalid deletion candidate")) {
- UtilWaitThread.sleep(250);
- try {
- output = FunctionalTestUtils.readAll(cluster, SimpleGarbageCollector.class, gc);
- } catch (IOException ioe) {
- log.error("Could not read all from cluster.", ioe);
- }
- }
- } finally {
- gc.destroy();
- }
-
- Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
- assertTrue(iter.hasNext());
- Entry<Key,Value> entry = iter.next();
- Assert.assertEquals("r1", entry.getKey().getRow().toString());
- Assert.assertEquals("cf1", entry.getKey().getColumnFamily().toString());
- Assert.assertEquals("cq1", entry.getKey().getColumnQualifier().toString());
- Assert.assertEquals("v1", entry.getValue().toString());
- Assert.assertFalse(iter.hasNext());
- }
-
- @Test
- public void testProperPortAdvertisement() throws Exception {
-
- Connector conn = getConnector();
- Instance instance = conn.getInstance();
-
- ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET);
- String path = ZooUtil.getRoot(instance) + Constants.ZGC_LOCK;
- for (int i = 0; i < 5; i++) {
- List<String> locks;
- try {
- locks = zk.getChildren(path, null);
- } catch (NoNodeException e) {
- Thread.sleep(5000);
- continue;
- }
-
- if (locks != null && locks.size() > 0) {
- Collections.sort(locks);
-
- String lockPath = path + "/" + locks.get(0);
-
- String gcLoc = new String(zk.getData(lockPath, null));
-
- Assert.assertTrue("Found unexpected data in zookeeper for GC location: " + gcLoc, gcLoc.startsWith(Service.GC_CLIENT.name()));
- int loc = gcLoc.indexOf(ServerServices.SEPARATOR_CHAR);
- Assert.assertNotEquals("Could not find split point of GC location for: " + gcLoc, -1, loc);
- String addr = gcLoc.substring(loc + 1);
-
- int addrSplit = addr.indexOf(':');
- Assert.assertNotEquals("Could not find split of GC host:port for: " + addr, -1, addrSplit);
-
- String host = addr.substring(0, addrSplit), port = addr.substring(addrSplit + 1);
- // We shouldn't have the "bindall" address in zk
- Assert.assertNotEquals("0.0.0.0", host);
- // Nor should we have the "random port" in zk
- Assert.assertNotEquals(0, Integer.parseInt(port));
- return;
- }
-
- Thread.sleep(5000);
- }
-
- Assert.fail("Could not find advertised GC address");
- }
-
- private int countFiles() throws Exception {
- FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
- Path path = new Path(cluster.getConfig().getDir() + "/accumulo/tables/1/*/*.rf");
- return Iterators.size(Arrays.asList(fs.globStatus(path)).iterator());
- }
-
- public static void addEntries(Connector conn, BatchWriterOpts bwOpts) throws Exception {
- conn.securityOperations().grantTablePermission(conn.whoami(), MetadataTable.NAME, TablePermission.WRITE);
- BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
-
- for (int i = 0; i < 100000; ++i) {
- final Text emptyText = new Text("");
- Text row = new Text(String.format("%s/%020d/%s", MetadataSchema.DeletesSection.getRowPrefix(), i,
- "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj"));
- Mutation delFlag = new Mutation(row);
- delFlag.put(emptyText, emptyText, new Value(new byte[] {}));
- bw.addMutation(delFlag);
- }
- bw.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
deleted file mode 100644
index 59d8259..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Map;
-
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.Daemon;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.start.Main;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.accumulo.tserver.TabletServer;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-public class HalfDeadTServerIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
- cfg.setProperty(Property.GENERAL_RPC_TIMEOUT, "5s");
- cfg.useMiniDFS(true);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- class DumpOutput extends Daemon {
-
- private final BufferedReader rdr;
- private final StringBuilder output;
-
- DumpOutput(InputStream is) {
- rdr = new BufferedReader(new InputStreamReader(is));
- output = new StringBuilder();
- }
-
- @Override
- public void run() {
- try {
- while (true) {
- String line = rdr.readLine();
- if (line == null)
- break;
- System.out.println(line);
- output.append(line);
- output.append("\n");
- }
- } catch (IOException ex) {
- log.error("IOException", ex);
- }
- }
-
- @Override
- public String toString() {
- return output.toString();
- }
- }
-
- @Test
- public void testRecover() throws Exception {
- test(10);
- }
-
- @Test
- public void testTimeout() throws Exception {
- String results = test(20, true);
- if (results != null) {
- if (!results.contains("Session expired")) {
- log.info("Failed to find 'Session expired' in output, but TServer did die which is expected");
- }
- }
- }
-
- public String test(int seconds) throws Exception {
- return test(seconds, false);
- }
-
- public String test(int seconds, boolean expectTserverDied) throws Exception {
- if (!makeDiskFailureLibrary())
- return null;
- Connector c = getConnector();
- assertEquals(1, c.instanceOperations().getTabletServers().size());
-
- // create our own tablet server with the special test library
- String javaHome = System.getProperty("java.home");
- String javaBin = javaHome + File.separator + "bin" + File.separator + "java";
- String classpath = System.getProperty("java.class.path");
- classpath = new File(cluster.getConfig().getDir(), "conf") + File.pathSeparator + classpath;
- String className = TabletServer.class.getName();
- ArrayList<String> argList = new ArrayList<String>();
- argList.addAll(Arrays.asList(javaBin, "-cp", classpath));
- argList.addAll(Arrays.asList(Main.class.getName(), className));
- ProcessBuilder builder = new ProcessBuilder(argList);
- Map<String,String> env = builder.environment();
- env.put("ACCUMULO_HOME", cluster.getConfig().getDir().getAbsolutePath());
- env.put("ACCUMULO_LOG_DIR", cluster.getConfig().getLogDir().getAbsolutePath());
- String trickFilename = cluster.getConfig().getLogDir().getAbsolutePath() + "/TRICK_FILE";
- env.put("TRICK_FILE", trickFilename);
- String libPath = System.getProperty("user.dir") + "/target/fake_disk_failure.so";
- env.put("LD_PRELOAD", libPath);
- env.put("DYLD_INSERT_LIBRARIES", libPath);
- env.put("DYLD_FORCE_FLAT_NAMESPACE", "true");
- Process ingest = null;
- Process tserver = builder.start();
- DumpOutput t = new DumpOutput(tserver.getInputStream());
- try {
- t.start();
- UtilWaitThread.sleep(1000);
- // don't need the regular tablet server
- cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
- UtilWaitThread.sleep(1000);
- c.tableOperations().create("test_ingest");
- assertEquals(1, c.instanceOperations().getTabletServers().size());
- int rows = 100 * 1000;
- ingest = cluster.exec(TestIngest.class, "-u", "root", "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", ROOT_PASSWORD, "--rows", rows
- + "");
- UtilWaitThread.sleep(500);
-
- // block I/O with some side-channel trickiness
- File trickFile = new File(trickFilename);
- try {
- assertTrue(trickFile.createNewFile());
- UtilWaitThread.sleep(seconds * 1000);
- } finally {
- if (!trickFile.delete()) {
- log.error("Couldn't delete " + trickFile);
- }
- }
-
- if (seconds <= 10) {
- assertEquals(0, ingest.waitFor());
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
- vopts.rows = rows;
- vopts.setPrincipal("root");
- VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
- } else {
- UtilWaitThread.sleep(5 * 1000);
- tserver.waitFor();
- t.join();
- tserver = null;
- }
- // verify the process was blocked
- String results = t.toString();
- assertTrue(results.contains("sleeping\nsleeping\nsleeping\n"));
- return results;
- } finally {
- if (ingest != null) {
- ingest.destroy();
- ingest.waitFor();
- }
- if (tserver != null) {
- try {
- if (expectTserverDied) {
- try {
- tserver.exitValue();
- } catch (IllegalThreadStateException e) {
- fail("Expected TServer to kill itself, but it is still running");
- }
- }
- } finally {
- tserver.destroy();
- tserver.waitFor();
- t.join();
- }
- }
- }
- }
-
- private boolean makeDiskFailureLibrary() throws Exception {
- String root = System.getProperty("user.dir");
- String source = root + "/src/test/c/fake_disk_failure.c";
- String lib = root + "/target/fake_disk_failure.so";
- String platform = System.getProperty("os.name");
- String cmd[];
- if (platform.equals("Darwin")) {
- cmd = new String[] {"gcc", "-arch", "x86_64", "-arch", "i386", "-dynamiclib", "-O3", "-fPIC", source, "-o", lib};
- } else {
- cmd = new String[] {"gcc", "-D_GNU_SOURCE", "-Wall", "-fPIC", source, "-shared", "-o", lib, "-ldl"};
- }
- Process gcc = Runtime.getRuntime().exec(cmd);
- return gcc.waitFor() == 0;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java b/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java
deleted file mode 100644
index aa8313e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/KerberosIT.java
+++ /dev/null
@@ -1,573 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.File;
-import java.lang.reflect.UndeclaredThrowableException;
-import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.CompactionConfig;
-import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
-import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier;
-import org.apache.accumulo.core.client.impl.DelegationTokenImpl;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.security.SystemPermission;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.harness.AccumuloITBase;
-import org.apache.accumulo.harness.MiniClusterConfigurationCallback;
-import org.apache.accumulo.harness.MiniClusterHarness;
-import org.apache.accumulo.harness.TestingKdc;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.minikdc.MiniKdc;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
-
-/**
- * MAC test which uses {@link MiniKdc} to simulate ta secure environment. Can be used as a sanity check for Kerberos/SASL testing.
- */
-public class KerberosIT extends AccumuloITBase {
- private static final Logger log = LoggerFactory.getLogger(KerberosIT.class);
-
- private static TestingKdc kdc;
- private static String krbEnabledForITs = null;
- private static ClusterUser rootUser;
-
- @BeforeClass
- public static void startKdc() throws Exception {
- kdc = new TestingKdc();
- kdc.start();
- krbEnabledForITs = System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION);
- if (null == krbEnabledForITs || !Boolean.parseBoolean(krbEnabledForITs)) {
- System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, "true");
- }
- rootUser = kdc.getRootUser();
- }
-
- @AfterClass
- public static void stopKdc() throws Exception {
- if (null != kdc) {
- kdc.stop();
- }
- if (null != krbEnabledForITs) {
- System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs);
- }
- }
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60 * 5;
- }
-
- private MiniAccumuloClusterImpl mac;
-
- @Before
- public void startMac() throws Exception {
- MiniClusterHarness harness = new MiniClusterHarness();
- mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
- Map<String,String> site = cfg.getSiteConfig();
- site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "10s");
- cfg.setSiteConfig(site);
- }
-
- });
-
- mac.getConfig().setNumTservers(1);
- mac.start();
- // Enabled kerberos auth
- Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- UserGroupInformation.setConfiguration(conf);
- }
-
- @After
- public void stopMac() throws Exception {
- if (null != mac) {
- mac.stop();
- }
- }
-
- @Test
- public void testAdminUser() throws Exception {
- // Login as the client (provided to `accumulo init` as the "root" user)
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-
- final Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-
- // The "root" user should have all system permissions
- for (SystemPermission perm : SystemPermission.values()) {
- assertTrue("Expected user to have permission: " + perm, conn.securityOperations().hasSystemPermission(conn.whoami(), perm));
- }
-
- // and the ability to modify the root and metadata tables
- for (String table : Arrays.asList(RootTable.NAME, MetadataTable.NAME)) {
- assertTrue(conn.securityOperations().hasTablePermission(conn.whoami(), table, TablePermission.ALTER_TABLE));
- }
- }
-
- @Test
- public void testNewUser() throws Exception {
- String newUser = testName.getMethodName();
- final File newUserKeytab = new File(kdc.getKeytabDir(), newUser + ".keytab");
- if (newUserKeytab.exists() && !newUserKeytab.delete()) {
- log.warn("Unable to delete {}", newUserKeytab);
- }
-
- // Create a new user
- kdc.createPrincipal(newUserKeytab, newUser);
-
- newUser = kdc.qualifyUser(newUser);
-
- // Login as the "root" user
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- log.info("Logged in as {}", rootUser.getPrincipal());
-
- Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
- log.info("Created connector as {}", rootUser.getPrincipal());
- assertEquals(rootUser.getPrincipal(), conn.whoami());
-
- // Make sure the system user doesn't exist -- this will force some RPC to happen server-side
- createTableWithDataAndCompact(conn);
-
- HashSet<String> users = Sets.newHashSet(rootUser.getPrincipal());
- assertEquals(users, conn.securityOperations().listLocalUsers());
-
- // Switch to a new user
- UserGroupInformation.loginUserFromKeytab(newUser, newUserKeytab.getAbsolutePath());
- log.info("Logged in as {}", newUser);
-
- conn = mac.getConnector(newUser, new KerberosToken());
- log.info("Created connector as {}", newUser);
- assertEquals(newUser, conn.whoami());
-
- // The new user should have no system permissions
- for (SystemPermission perm : SystemPermission.values()) {
- assertFalse(conn.securityOperations().hasSystemPermission(newUser, perm));
- }
-
- users.add(newUser);
-
- // Same users as before, plus the new user we just created
- assertEquals(users, conn.securityOperations().listLocalUsers());
- }
-
- @Test
- public void testUserPrivilegesThroughGrant() throws Exception {
- String user1 = testName.getMethodName();
- final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab");
- if (user1Keytab.exists() && !user1Keytab.delete()) {
- log.warn("Unable to delete {}", user1Keytab);
- }
-
- // Create some new users
- kdc.createPrincipal(user1Keytab, user1);
-
- user1 = kdc.qualifyUser(user1);
-
- // Log in as user1
- UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
- log.info("Logged in as {}", user1);
-
- // Indirectly creates this user when we use it
- Connector conn = mac.getConnector(user1, new KerberosToken());
- log.info("Created connector as {}", user1);
-
- // The new user should have no system permissions
- for (SystemPermission perm : SystemPermission.values()) {
- assertFalse(conn.securityOperations().hasSystemPermission(user1, perm));
- }
-
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-
- conn.securityOperations().grantSystemPermission(user1, SystemPermission.CREATE_TABLE);
-
- // Switch back to the original user
- UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
- conn = mac.getConnector(user1, new KerberosToken());
-
- // Shouldn't throw an exception since we granted the create table permission
- final String table = testName.getMethodName() + "_user_table";
- conn.tableOperations().create(table);
-
- // Make sure we can actually use the table we made
- BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
- Mutation m = new Mutation("a");
- m.put("b", "c", "d");
- bw.addMutation(m);
- bw.close();
-
- conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
- }
-
- @Test
- public void testUserPrivilegesForTable() throws Exception {
- String user1 = testName.getMethodName();
- final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab");
- if (user1Keytab.exists() && !user1Keytab.delete()) {
- log.warn("Unable to delete {}", user1Keytab);
- }
-
- // Create some new users -- cannot contain realm
- kdc.createPrincipal(user1Keytab, user1);
-
- user1 = kdc.qualifyUser(user1);
-
- // Log in as user1
- UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
- log.info("Logged in as {}", user1);
-
- // Indirectly creates this user when we use it
- Connector conn = mac.getConnector(user1, new KerberosToken());
- log.info("Created connector as {}", user1);
-
- // The new user should have no system permissions
- for (SystemPermission perm : SystemPermission.values()) {
- assertFalse(conn.securityOperations().hasSystemPermission(user1, perm));
- }
-
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
-
- final String table = testName.getMethodName() + "_user_table";
- conn.tableOperations().create(table);
-
- final String viz = "viz";
-
- // Give our unprivileged user permission on the table we made for them
- conn.securityOperations().grantTablePermission(user1, table, TablePermission.READ);
- conn.securityOperations().grantTablePermission(user1, table, TablePermission.WRITE);
- conn.securityOperations().grantTablePermission(user1, table, TablePermission.ALTER_TABLE);
- conn.securityOperations().grantTablePermission(user1, table, TablePermission.DROP_TABLE);
- conn.securityOperations().changeUserAuthorizations(user1, new Authorizations(viz));
-
- // Switch back to the original user
- UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
- conn = mac.getConnector(user1, new KerberosToken());
-
- // Make sure we can actually use the table we made
-
- // Write data
- final long ts = 1000l;
- BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
- Mutation m = new Mutation("a");
- m.put("b", "c", new ColumnVisibility(viz.getBytes()), ts, "d");
- bw.addMutation(m);
- bw.close();
-
- // Compact
- conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
-
- // Alter
- conn.tableOperations().setProperty(table, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
-
- // Read (and proper authorizations)
- Scanner s = conn.createScanner(table, new Authorizations(viz));
- Iterator<Entry<Key,Value>> iter = s.iterator();
- assertTrue("No results from iterator", iter.hasNext());
- Entry<Key,Value> entry = iter.next();
- assertEquals(new Key("a", "b", "c", viz, ts), entry.getKey());
- assertEquals(new Value("d".getBytes()), entry.getValue());
- assertFalse("Had more results from iterator", iter.hasNext());
- }
-
- @Test
- public void testDelegationToken() throws Exception {
- final String tableName = getUniqueNames(1)[0];
-
- // Login as the "root" user
- UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- log.info("Logged in as {}", rootUser.getPrincipal());
-
- final int numRows = 100, numColumns = 10;
-
- // As the "root" user, open up the connection and get a delegation token
- final AuthenticationToken delegationToken = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
- @Override
- public AuthenticationToken run() throws Exception {
- Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
- log.info("Created connector as {}", rootUser.getPrincipal());
- assertEquals(rootUser.getPrincipal(), conn.whoami());
-
- conn.tableOperations().create(tableName);
- BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
- for (int r = 0; r < numRows; r++) {
- Mutation m = new Mutation(Integer.toString(r));
- for (int c = 0; c < numColumns; c++) {
- String col = Integer.toString(c);
- m.put(col, col, col);
- }
- bw.addMutation(m);
- }
- bw.close();
-
- return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
- }
- });
-
- // The above login with keytab doesn't have a way to logout, so make a fake user that won't have krb credentials
- UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user", new String[0]);
- int recordsSeen = userWithoutPrivs.doAs(new PrivilegedExceptionAction<Integer>() {
- @Override
- public Integer run() throws Exception {
- Connector conn = mac.getConnector(rootUser.getPrincipal(), delegationToken);
-
- BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 2);
- bs.setRanges(Collections.singleton(new Range()));
- int recordsSeen = Iterables.size(bs);
- bs.close();
- return recordsSeen;
- }
- });
-
- assertEquals(numRows * numColumns, recordsSeen);
- }
-
- @Test
- public void testDelegationTokenAsDifferentUser() throws Exception {
- // Login as the "root" user
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- log.info("Logged in as {}", rootUser.getPrincipal());
-
- // As the "root" user, open up the connection and get a delegation token
- Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
- log.info("Created connector as {}", rootUser.getPrincipal());
- assertEquals(rootUser.getPrincipal(), conn.whoami());
- final AuthenticationToken delegationToken = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
-
- // The above login with keytab doesn't have a way to logout, so make a fake user that won't have krb credentials
- UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user", new String[0]);
- try {
- // Use the delegation token to try to log in as a different user
- userWithoutPrivs.doAs(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- mac.getConnector("some_other_user", delegationToken);
- return null;
- }
- });
- fail("Using a delegation token as a different user should throw an exception");
- } catch (UndeclaredThrowableException e) {
- Throwable cause = e.getCause();
- assertNotNull(cause);
- // We should get an AccumuloSecurityException from trying to use a delegation token for the wrong user
- assertTrue("Expected cause to be AccumuloSecurityException, but was " + cause.getClass(), cause instanceof AccumuloSecurityException);
- }
- }
-
- @Test(expected = AccumuloSecurityException.class)
- public void testGetDelegationTokenDenied() throws Exception {
- String newUser = testName.getMethodName();
- final File newUserKeytab = new File(kdc.getKeytabDir(), newUser + ".keytab");
- if (newUserKeytab.exists() && !newUserKeytab.delete()) {
- log.warn("Unable to delete {}", newUserKeytab);
- }
-
- // Create a new user
- kdc.createPrincipal(newUserKeytab, newUser);
-
- newUser = kdc.qualifyUser(newUser);
-
- // Login as a normal user
- UserGroupInformation.loginUserFromKeytab(newUser, newUserKeytab.getAbsolutePath());
-
- // As the "root" user, open up the connection and get a delegation token
- Connector conn = mac.getConnector(newUser, new KerberosToken());
- log.info("Created connector as {}", newUser);
- assertEquals(newUser, conn.whoami());
-
- conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
- }
-
- @Test
- public void testRestartedMasterReusesSecretKey() throws Exception {
- // Login as the "root" user
- UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- log.info("Logged in as {}", rootUser.getPrincipal());
-
- // As the "root" user, open up the connection and get a delegation token
- final AuthenticationToken delegationToken1 = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
- @Override
- public AuthenticationToken run() throws Exception {
- Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
- log.info("Created connector as {}", rootUser.getPrincipal());
- assertEquals(rootUser.getPrincipal(), conn.whoami());
-
- AuthenticationToken token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
-
- assertTrue("Could not get tables with delegation token", mac.getConnector(rootUser.getPrincipal(), token).tableOperations().list().size() > 0);
-
- return token;
- }
- });
-
- log.info("Stopping master");
- mac.getClusterControl().stop(ServerType.MASTER);
- Thread.sleep(5000);
- log.info("Restarting master");
- mac.getClusterControl().start(ServerType.MASTER);
-
- // Make sure our original token is still good
- root.doAs(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- Connector conn = mac.getConnector(rootUser.getPrincipal(), delegationToken1);
-
- assertTrue("Could not get tables with delegation token", conn.tableOperations().list().size() > 0);
-
- return null;
- }
- });
-
- // Get a new token, so we can compare the keyId on the second to the first
- final AuthenticationToken delegationToken2 = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
- @Override
- public AuthenticationToken run() throws Exception {
- Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
- log.info("Created connector as {}", rootUser.getPrincipal());
- assertEquals(rootUser.getPrincipal(), conn.whoami());
-
- AuthenticationToken token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
-
- assertTrue("Could not get tables with delegation token", mac.getConnector(rootUser.getPrincipal(), token).tableOperations().list().size() > 0);
-
- return token;
- }
- });
-
- // A restarted master should reuse the same secret key after a restart if the secret key hasn't expired (1day by default)
- DelegationTokenImpl dt1 = (DelegationTokenImpl) delegationToken1;
- DelegationTokenImpl dt2 = (DelegationTokenImpl) delegationToken2;
- assertEquals(dt1.getIdentifier().getKeyId(), dt2.getIdentifier().getKeyId());
- }
-
- @Test(expected = AccumuloException.class)
- public void testDelegationTokenWithInvalidLifetime() throws Throwable {
- // Login as the "root" user
- UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- log.info("Logged in as {}", rootUser.getPrincipal());
-
- // As the "root" user, open up the connection and get a delegation token
- try {
- root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
- @Override
- public AuthenticationToken run() throws Exception {
- Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
- log.info("Created connector as {}", rootUser.getPrincipal());
- assertEquals(rootUser.getPrincipal(), conn.whoami());
-
- // Should fail
- return conn.securityOperations().getDelegationToken(new DelegationTokenConfig().setTokenLifetime(Long.MAX_VALUE, TimeUnit.MILLISECONDS));
- }
- });
- } catch (UndeclaredThrowableException e) {
- Throwable cause = e.getCause();
- if (null != cause) {
- throw cause;
- } else {
- throw e;
- }
- }
- }
-
- @Test
- public void testDelegationTokenWithReducedLifetime() throws Throwable {
- // Login as the "root" user
- UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- log.info("Logged in as {}", rootUser.getPrincipal());
-
- // As the "root" user, open up the connection and get a delegation token
- final AuthenticationToken dt = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
- @Override
- public AuthenticationToken run() throws Exception {
- Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
- log.info("Created connector as {}", rootUser.getPrincipal());
- assertEquals(rootUser.getPrincipal(), conn.whoami());
-
- return conn.securityOperations().getDelegationToken(new DelegationTokenConfig().setTokenLifetime(5, TimeUnit.MINUTES));
- }
- });
-
- AuthenticationTokenIdentifier identifier = ((DelegationTokenImpl) dt).getIdentifier();
- assertTrue("Expected identifier to expire in no more than 5 minutes: " + identifier,
- identifier.getExpirationDate() - identifier.getIssueDate() <= (5 * 60 * 1000));
- }
-
- /**
- * Creates a table, adds a record to it, and then compacts the table. A simple way to make sure that the system user exists (since the master does an RPC to
- * the tserver which will create the system user if it doesn't already exist).
- */
- private void createTableWithDataAndCompact(Connector conn) throws TableNotFoundException, AccumuloSecurityException, AccumuloException, TableExistsException {
- final String table = testName.getMethodName() + "_table";
- conn.tableOperations().create(table);
- BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
- Mutation m = new Mutation("a");
- m.put("b", "c", "d");
- bw.addMutation(m);
- bw.close();
- conn.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true));
- }
-}
[40/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/FileArchiveIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/FileArchiveIT.java b/test/src/main/java/org/apache/accumulo/test/FileArchiveIT.java
new file mode 100644
index 0000000..8e51984
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/FileArchiveIT.java
@@ -0,0 +1,271 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+/**
+ * Tests that files are archived instead of deleted when configured.
+ */
+public class FileArchiveIT extends ConfigurableMacBase {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
+ cfg.setProperty(Property.GC_FILE_ARCHIVE, "true");
+ cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
+ cfg.setProperty(Property.GC_CYCLE_START, "1s");
+ }
+
+ @Test
+ public void testUnusuedFilesAreArchived() throws Exception {
+ final Connector conn = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(tableName);
+
+ final String tableId = conn.tableOperations().tableIdMap().get(tableName);
+ Assert.assertNotNull("Could not get table ID", tableId);
+
+ BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("row");
+ m.put("", "", "value");
+ bw.addMutation(m);
+ bw.close();
+
+ // Compact memory to disk
+ conn.tableOperations().compact(tableName, null, null, true, true);
+
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+
+ Entry<Key,Value> entry = Iterables.getOnlyElement(s);
+ final String file = entry.getKey().getColumnQualifier().toString();
+ final Path p = new Path(file);
+
+ // Then force another to make an unreferenced file
+ conn.tableOperations().compact(tableName, null, null, true, true);
+
+ log.info("File for table: " + file);
+
+ FileSystem fs = getCluster().getFileSystem();
+ int i = 0;
+ while (fs.exists(p)) {
+ i++;
+ Thread.sleep(1000);
+ if (0 == i % 10) {
+ log.info("Waited " + i + " iterations, file still exists");
+ }
+ }
+
+ log.info("File was removed");
+
+ String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
+
+ log.info("File relative to accumulo dir: " + filePath);
+
+ Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
+
+ Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
+
+ // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
+ Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
+
+ Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
+ }
+
+ @Test
+ public void testDeletedTableIsArchived() throws Exception {
+ final Connector conn = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(tableName);
+
+ final String tableId = conn.tableOperations().tableIdMap().get(tableName);
+ Assert.assertNotNull("Could not get table ID", tableId);
+
+ BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("row");
+ m.put("", "", "value");
+ bw.addMutation(m);
+ bw.close();
+
+ // Compact memory to disk
+ conn.tableOperations().compact(tableName, null, null, true, true);
+
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+
+ Entry<Key,Value> entry = Iterables.getOnlyElement(s);
+ final String file = entry.getKey().getColumnQualifier().toString();
+ final Path p = new Path(file);
+
+ conn.tableOperations().delete(tableName);
+
+ log.info("File for table: " + file);
+
+ FileSystem fs = getCluster().getFileSystem();
+ int i = 0;
+ while (fs.exists(p)) {
+ i++;
+ Thread.sleep(1000);
+ if (0 == i % 10) {
+ log.info("Waited " + i + " iterations, file still exists");
+ }
+ }
+
+ log.info("File was removed");
+
+ String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
+
+ log.info("File relative to accumulo dir: " + filePath);
+
+ Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
+
+ Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
+
+ // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
+ Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
+
+ Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
+ }
+
+ @Test
+ public void testUnusuedFilesAndDeletedTable() throws Exception {
+ final Connector conn = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(tableName);
+
+ final String tableId = conn.tableOperations().tableIdMap().get(tableName);
+ Assert.assertNotNull("Could not get table ID", tableId);
+
+ BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("row");
+ m.put("", "", "value");
+ bw.addMutation(m);
+ bw.close();
+
+ // Compact memory to disk
+ conn.tableOperations().compact(tableName, null, null, true, true);
+
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+
+ Entry<Key,Value> entry = Iterables.getOnlyElement(s);
+ final String file = entry.getKey().getColumnQualifier().toString();
+ final Path p = new Path(file);
+
+ // Then force another to make an unreferenced file
+ conn.tableOperations().compact(tableName, null, null, true, true);
+
+ log.info("File for table: " + file);
+
+ FileSystem fs = getCluster().getFileSystem();
+ int i = 0;
+ while (fs.exists(p)) {
+ i++;
+ Thread.sleep(1000);
+ if (0 == i % 10) {
+ log.info("Waited " + i + " iterations, file still exists");
+ }
+ }
+
+ log.info("File was removed");
+
+ String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
+
+ log.info("File relative to accumulo dir: " + filePath);
+
+ Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
+
+ Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
+
+ // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
+ Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
+
+ Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
+
+ // Offline the table so we can be sure there is a single file
+ conn.tableOperations().offline(tableName, true);
+
+ // See that the file in metadata currently is
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+
+ entry = Iterables.getOnlyElement(s);
+ final String finalFile = entry.getKey().getColumnQualifier().toString();
+ final Path finalPath = new Path(finalFile);
+
+ conn.tableOperations().delete(tableName);
+
+ log.info("File for table: " + finalPath);
+
+ i = 0;
+ while (fs.exists(finalPath)) {
+ i++;
+ Thread.sleep(1000);
+ if (0 == i % 10) {
+ log.info("Waited " + i + " iterations, file still exists");
+ }
+ }
+
+ log.info("File was removed");
+
+ String finalFilePath = finalPath.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
+
+ log.info("File relative to accumulo dir: " + finalFilePath);
+
+ Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
+
+ // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
+ Path finalArchivedFile = new Path(fileArchiveDir, finalFilePath.substring(1));
+
+ Assert.assertTrue("File doesn't exists in archive directory: " + finalArchivedFile, fs.exists(finalArchivedFile));
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/GarbageCollectWALIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/GarbageCollectWALIT.java b/test/src/main/java/org/apache/accumulo/test/GarbageCollectWALIT.java
new file mode 100644
index 0000000..141ee27
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/GarbageCollectWALIT.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class GarbageCollectWALIT extends ConfigurableMacBase {
+
+ @Override
+ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.INSTANCE_ZK_HOST, "5s");
+ cfg.setProperty(Property.GC_CYCLE_START, "1s");
+ cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
+ cfg.setNumTservers(1);
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Test(timeout = 2 * 60 * 1000)
+ public void test() throws Exception {
+ // not yet, please
+ String tableName = getUniqueNames(1)[0];
+ cluster.getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
+ Connector c = getConnector();
+ c.tableOperations().create(tableName);
+ // count the number of WALs in the filesystem
+ assertEquals(2, countWALsInFS(cluster));
+ cluster.getClusterControl().stop(ServerType.TABLET_SERVER);
+ cluster.getClusterControl().start(ServerType.GARBAGE_COLLECTOR);
+ cluster.getClusterControl().start(ServerType.TABLET_SERVER);
+ Iterators.size(c.createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
+ // let GC run
+ UtilWaitThread.sleep(3 * 5 * 1000);
+ assertEquals(2, countWALsInFS(cluster));
+ }
+
+ private int countWALsInFS(MiniAccumuloClusterImpl cluster) throws Exception {
+ FileSystem fs = cluster.getFileSystem();
+ RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(new Path(cluster.getConfig().getAccumuloDir() + "/wal"), true);
+ int result = 0;
+ while (iterator.hasNext()) {
+ LocatedFileStatus next = iterator.next();
+ if (!next.isDirectory()) {
+ result++;
+ }
+ }
+ return result;
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/ImportExportIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/ImportExportIT.java b/test/src/main/java/org/apache/accumulo/test/ImportExportIT.java
new file mode 100644
index 0000000..55d83f5
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/ImportExportIT.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * ImportTable didn't correctly place absolute paths in metadata. This resulted in the imported table only being usable when the actual HDFS directory for
+ * Accumulo was the same as Property.INSTANCE_DFS_DIR. If any other HDFS directory was used, any interactions with the table would fail because the relative
+ * path in the metadata table (created by the ImportTable process) would be converted to a non-existent absolute path.
+ * <p>
+ * ACCUMULO-3215
+ *
+ */
+public class ImportExportIT extends AccumuloClusterHarness {
+
+ private static final Logger log = LoggerFactory.getLogger(ImportExportIT.class);
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void testExportImportThenScan() throws Exception {
+ Connector conn = getConnector();
+
+ String[] tableNames = getUniqueNames(2);
+ String srcTable = tableNames[0], destTable = tableNames[1];
+ conn.tableOperations().create(srcTable);
+
+ BatchWriter bw = conn.createBatchWriter(srcTable, new BatchWriterConfig());
+ for (int row = 0; row < 1000; row++) {
+ Mutation m = new Mutation(Integer.toString(row));
+ for (int col = 0; col < 100; col++) {
+ m.put(Integer.toString(col), "", Integer.toString(col * 2));
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ conn.tableOperations().compact(srcTable, null, null, true, true);
+
+ // Make a directory we can use to throw the export and import directories
+ // Must exist on the filesystem the cluster is running.
+ FileSystem fs = cluster.getFileSystem();
+ Path tmp = cluster.getTemporaryPath();
+ log.info("Using FileSystem: " + fs);
+ Path baseDir = new Path(tmp, getClass().getName());
+ if (fs.exists(baseDir)) {
+ log.info("{} exists on filesystem, deleting", baseDir);
+ assertTrue("Failed to deleted " + baseDir, fs.delete(baseDir, true));
+ }
+ log.info("Creating {}", baseDir);
+ assertTrue("Failed to create " + baseDir, fs.mkdirs(baseDir));
+ Path exportDir = new Path(baseDir, "export");
+ Path importDir = new Path(baseDir, "import");
+ for (Path p : new Path[] {exportDir, importDir}) {
+ assertTrue("Failed to create " + baseDir, fs.mkdirs(p));
+ }
+
+ FsShell fsShell = new FsShell(fs.getConf());
+ assertEquals("Failed to chmod " + baseDir, 0, fsShell.run(new String[] {"-chmod", "-R", "777", baseDir.toString()}));
+
+ log.info("Exporting table to {}", exportDir);
+ log.info("Importing table from {}", importDir);
+
+ // Offline the table
+ conn.tableOperations().offline(srcTable, true);
+ // Then export it
+ conn.tableOperations().exportTable(srcTable, exportDir.toString());
+
+ // Make sure the distcp.txt file that exporttable creates is available
+ Path distcp = new Path(exportDir, "distcp.txt");
+ Assert.assertTrue("Distcp file doesn't exist", fs.exists(distcp));
+ FSDataInputStream is = fs.open(distcp);
+ BufferedReader reader = new BufferedReader(new InputStreamReader(is));
+
+ // Copy each file that was exported to the import directory
+ String line;
+ while (null != (line = reader.readLine())) {
+ Path p = new Path(line.substring(5));
+ Assert.assertTrue("File doesn't exist: " + p, fs.exists(p));
+
+ Path dest = new Path(importDir, p.getName());
+ Assert.assertFalse("Did not expect " + dest + " to exist", fs.exists(dest));
+ FileUtil.copy(fs, p, fs, dest, false, fs.getConf());
+ }
+
+ reader.close();
+
+ log.info("Import dir: {}", Arrays.toString(fs.listStatus(importDir)));
+
+ // Import the exported data into a new table
+ conn.tableOperations().importTable(destTable, importDir.toString());
+
+ // Get the table ID for the table that the importtable command created
+ final String tableId = conn.tableOperations().tableIdMap().get(destTable);
+ Assert.assertNotNull(tableId);
+
+ // Get all `file` colfams from the metadata table for the new table
+ log.info("Imported into table with ID: {}", tableId);
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+ MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(s);
+
+ // Should find a single entry
+ for (Entry<Key,Value> fileEntry : s) {
+ Key k = fileEntry.getKey();
+ String value = fileEntry.getValue().toString();
+ if (k.getColumnFamily().equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
+ // The file should be an absolute URI (file:///...), not a relative path (/b-000.../I000001.rf)
+ String fileUri = k.getColumnQualifier().toString();
+ Assert.assertFalse("Imported files should have absolute URIs, not relative: " + fileUri, looksLikeRelativePath(fileUri));
+ } else if (k.getColumnFamily().equals(MetadataSchema.TabletsSection.ServerColumnFamily.NAME)) {
+ Assert.assertFalse("Server directory should have absolute URI, not relative: " + value, looksLikeRelativePath(value));
+ } else {
+ Assert.fail("Got expected pair: " + k + "=" + fileEntry.getValue());
+ }
+ }
+
+ // Online the original table before we verify equivalence
+ conn.tableOperations().online(srcTable, true);
+
+ verifyTableEquality(conn, srcTable, destTable);
+ }
+
+ private void verifyTableEquality(Connector conn, String srcTable, String destTable) throws Exception {
+ Iterator<Entry<Key,Value>> src = conn.createScanner(srcTable, Authorizations.EMPTY).iterator(), dest = conn.createScanner(destTable, Authorizations.EMPTY)
+ .iterator();
+ Assert.assertTrue("Could not read any data from source table", src.hasNext());
+ Assert.assertTrue("Could not read any data from destination table", dest.hasNext());
+ while (src.hasNext() && dest.hasNext()) {
+ Entry<Key,Value> orig = src.next(), copy = dest.next();
+ Assert.assertEquals(orig.getKey(), copy.getKey());
+ Assert.assertEquals(orig.getValue(), copy.getValue());
+ }
+ Assert.assertFalse("Source table had more data to read", src.hasNext());
+ Assert.assertFalse("Dest table had more data to read", dest.hasNext());
+ }
+
+ private boolean looksLikeRelativePath(String uri) {
+ if (uri.startsWith("/" + Constants.BULK_PREFIX)) {
+ if ('/' == uri.charAt(10)) {
+ return true;
+ }
+ } else if (uri.startsWith("/" + Constants.CLONE_PREFIX)) {
+ return true;
+ }
+
+ return false;
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/IntegrationTestMapReduce.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/IntegrationTestMapReduce.java b/test/src/main/java/org/apache/accumulo/test/IntegrationTestMapReduce.java
new file mode 100644
index 0000000..e33f3a9
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/IntegrationTestMapReduce.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.input.NLineInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.runner.Description;
+import org.junit.runner.JUnitCore;
+import org.junit.runner.Result;
+import org.junit.runner.notification.Failure;
+import org.junit.runner.notification.RunListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class IntegrationTestMapReduce extends Configured implements Tool {
+
+ private static final Logger log = LoggerFactory.getLogger(IntegrationTestMapReduce.class);
+
+ public static class TestMapper extends Mapper<LongWritable,Text,IntWritable,Text> {
+
+ @Override
+ protected void map(LongWritable key, Text value, final Mapper<LongWritable,Text,IntWritable,Text>.Context context) throws IOException, InterruptedException {
+ String className = value.toString();
+ if (className.trim().isEmpty()) {
+ return;
+ }
+ Class<? extends Object> test = null;
+ try {
+ test = Class.forName(className);
+ } catch (ClassNotFoundException e) {
+ log.debug("Error finding class {}", className, e);
+ context.write(new IntWritable(-1), new Text(e.toString()));
+ return;
+ }
+ JUnitCore core = new JUnitCore();
+ core.addListener(new RunListener() {
+
+ @Override
+ public void testStarted(Description description) throws Exception {
+ log.info("Starting {}", description);
+ context.progress();
+ }
+
+ @Override
+ public void testFinished(Description description) throws Exception {
+ log.info("Finished {}", description);
+ context.progress();
+ }
+
+ @Override
+ public void testFailure(Failure failure) throws Exception {
+ log.info("Test failed: {}", failure.getDescription(), failure.getException());
+ context.progress();
+ }
+
+ });
+ log.info("Running test {}", className);
+ try {
+ Result result = core.run(test);
+ if (result.wasSuccessful()) {
+ log.info("{} was successful", className);
+ context.write(new IntWritable(0), value);
+ } else {
+ log.info("{} failed", className);
+ context.write(new IntWritable(1), value);
+ }
+ } catch (Exception e) {
+ // most likely JUnit issues, like no tests to run
+ log.info("Test failed: {}", className, e);
+ }
+ }
+ }
+
+ public static class TestReducer extends Reducer<IntWritable,Text,IntWritable,Text> {
+
+ @Override
+ protected void reduce(IntWritable code, Iterable<Text> tests, Reducer<IntWritable,Text,IntWritable,Text>.Context context) throws IOException,
+ InterruptedException {
+ StringBuffer result = new StringBuffer();
+ for (Text test : tests) {
+ result.append(test);
+ result.append("\n");
+ }
+ context.write(code, new Text(result.toString()));
+ }
+ }
+
+ @Override
+ public int run(String[] args) throws Exception {
+ // read a list of tests from the input, and print out the results
+ if (args.length != 2) {
+ System.err.println("Wrong number of args: <input> <output>");
+ }
+ Configuration conf = getConf();
+ Job job = Job.getInstance(conf, "accumulo integration test runner");
+ // read one line at a time
+ job.setInputFormatClass(NLineInputFormat.class);
+ conf.setInt(NLineInputFormat.LINES_PER_MAP, 1);
+
+ // run the test
+ job.setJarByClass(IntegrationTestMapReduce.class);
+ job.setMapperClass(TestMapper.class);
+
+ // group test by result code
+ job.setReducerClass(TestReducer.class);
+ job.setOutputKeyClass(IntWritable.class);
+ job.setOutputValueClass(Text.class);
+
+ FileInputFormat.addInputPath(job, new Path(args[0]));
+ FileOutputFormat.setOutputPath(job, new Path(args[1]));
+ return job.waitForCompletion(true) ? 0 : 1;
+ }
+
+ public static void main(String[] args) throws Exception {
+ System.exit(ToolRunner.run(new IntegrationTestMapReduce(), args));
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/InterruptibleScannersIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/InterruptibleScannersIT.java b/test/src/main/java/org/apache/accumulo/test/InterruptibleScannersIT.java
new file mode 100644
index 0000000..a272bc2
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/InterruptibleScannersIT.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.ActiveScan;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.SlowIterator;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+// ACCUMULO-3030
+public class InterruptibleScannersIT extends AccumuloClusterHarness {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ }
+
+ @Test
+ public void test() throws Exception {
+ // make a table
+ final String tableName = getUniqueNames(1)[0];
+ final Connector conn = getConnector();
+ conn.tableOperations().create(tableName);
+ // make the world's slowest scanner
+ final Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
+ final IteratorSetting cfg = new IteratorSetting(100, SlowIterator.class);
+ // Wait long enough to be sure we can catch it, but not indefinitely.
+ SlowIterator.setSeekSleepTime(cfg, 60 * 1000);
+ scanner.addScanIterator(cfg);
+ // create a thread to interrupt the slow scan
+ final Thread scanThread = Thread.currentThread();
+ Thread thread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ // ensure the scan is running: not perfect, the metadata tables could be scanned, too.
+ String tserver = conn.instanceOperations().getTabletServers().iterator().next();
+ do {
+ ArrayList<ActiveScan> scans = new ArrayList<ActiveScan>(conn.instanceOperations().getActiveScans(tserver));
+ Iterator<ActiveScan> iter = scans.iterator();
+ while (iter.hasNext()) {
+ ActiveScan scan = iter.next();
+ // Remove scans not against our table and not owned by us
+ if (!getAdminPrincipal().equals(scan.getUser()) || !tableName.equals(scan.getTable())) {
+ iter.remove();
+ }
+ }
+
+ if (!scans.isEmpty()) {
+ // We found our scan
+ break;
+ }
+ } while (true);
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+ // BAM!
+ scanThread.interrupt();
+ }
+ };
+ thread.start();
+ try {
+ // Use the scanner, expect problems
+ Iterators.size(scanner.iterator());
+ Assert.fail("Scan should not succeed");
+ } catch (Exception ex) {} finally {
+ thread.join();
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/KeyValueEqualityIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/KeyValueEqualityIT.java b/test/src/main/java/org/apache/accumulo/test/KeyValueEqualityIT.java
new file mode 100644
index 0000000..b0734b4
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/KeyValueEqualityIT.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class KeyValueEqualityIT extends AccumuloClusterHarness {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void testEquality() throws Exception {
+ Connector conn = this.getConnector();
+ final BatchWriterConfig config = new BatchWriterConfig();
+
+ final String[] tables = getUniqueNames(2);
+ final String table1 = tables[0], table2 = tables[1];
+ final TableOperations tops = conn.tableOperations();
+ tops.create(table1);
+ tops.create(table2);
+
+ final BatchWriter bw1 = conn.createBatchWriter(table1, config), bw2 = conn.createBatchWriter(table2, config);
+
+ for (int row = 0; row < 100; row++) {
+ Mutation m = new Mutation(Integer.toString(row));
+ for (int col = 0; col < 10; col++) {
+ m.put(Integer.toString(col), "", System.currentTimeMillis(), Integer.toString(col * 2));
+ }
+ bw1.addMutation(m);
+ bw2.addMutation(m);
+ }
+
+ bw1.close();
+ bw2.close();
+
+ Iterator<Entry<Key,Value>> t1 = conn.createScanner(table1, Authorizations.EMPTY).iterator(), t2 = conn.createScanner(table2, Authorizations.EMPTY)
+ .iterator();
+ while (t1.hasNext() && t2.hasNext()) {
+ // KeyValue, the implementation of Entry<Key,Value>, should support equality and hashCode properly
+ Entry<Key,Value> e1 = t1.next(), e2 = t2.next();
+ Assert.assertEquals(e1, e2);
+ Assert.assertEquals(e1.hashCode(), e2.hashCode());
+ }
+ Assert.assertFalse("table1 had more data to read", t1.hasNext());
+ Assert.assertFalse("table2 had more data to read", t2.hasNext());
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java b/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java
new file mode 100644
index 0000000..479bb0e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java
@@ -0,0 +1,286 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.impl.AccumuloServerException;
+import org.apache.accumulo.core.client.impl.Namespaces;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.conf.TableConfiguration;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LargeSplitRowIT extends ConfigurableMacBase {
+ static private final Logger log = LoggerFactory.getLogger(LargeSplitRowIT.class);
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+
+ Map<String,String> siteConfig = new HashMap<String,String>();
+ siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "50ms");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ // User added split
+ @Test(timeout = 60 * 1000)
+ public void userAddedSplit() throws Exception {
+
+ log.info("User added split");
+
+ // make a table and lower the TABLE_END_ROW_MAX_SIZE property
+ final String tableName = getUniqueNames(1)[0];
+ final Connector conn = getConnector();
+ conn.tableOperations().create(tableName);
+ conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
+
+ // Create a BatchWriter and add a mutation to the table
+ BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("Row");
+ m.put("cf", "cq", "value");
+ batchWriter.addMutation(m);
+ batchWriter.close();
+
+ // Create a split point that is too large to be an end row and fill it with all 'm'
+ SortedSet<Text> partitionKeys = new TreeSet<Text>();
+ byte data[] = new byte[(int) (TableConfiguration.getMemoryInBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
+ for (int i = 0; i < data.length; i++) {
+ data[i] = 'm';
+ }
+ partitionKeys.add(new Text(data));
+
+ // try to add the split point that is too large, if the split point is created the test fails.
+ try {
+ conn.tableOperations().addSplits(tableName, partitionKeys);
+ Assert.fail();
+ } catch (AccumuloServerException e) {}
+
+ // Make sure that the information that was written to the table before we tried to add the split point is still correct
+ int counter = 0;
+ final Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
+ for (Entry<Key,Value> entry : scanner) {
+ counter++;
+ Key k = entry.getKey();
+ Assert.assertEquals("Row", k.getRow().toString());
+ Assert.assertEquals("cf", k.getColumnFamily().toString());
+ Assert.assertEquals("cq", k.getColumnQualifier().toString());
+ Assert.assertEquals("value", entry.getValue().toString());
+
+ }
+ // Make sure there is only one line in the table
+ Assert.assertEquals(1, counter);
+ }
+
+ // Test tablet server split with 250 entries with all the same prefix
+ @Test(timeout = 60 * 1000)
+ public void automaticSplitWith250Same() throws Exception {
+ log.info("Automatic with 250 with same prefix");
+
+ // make a table and lower the configure properties
+ final String tableName = getUniqueNames(1)[0];
+ final Connector conn = getConnector();
+ conn.tableOperations().create(tableName);
+ conn.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+ conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
+ conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64");
+ conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
+
+ // Create a BatchWriter and key for a table entry that is longer than the allowed size for an end row
+ // Fill this key with all m's except the last spot
+ BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ byte data[] = new byte[(int) (TableConfiguration.getMemoryInBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
+ for (int i = 0; i < data.length - 1; i++) {
+ data[i] = (byte) 'm';
+ }
+
+ // Make the last place in the key different for every entry added to the table
+ for (int i = 0; i < 250; i++) {
+ data[data.length - 1] = (byte) i;
+ Mutation m = new Mutation(data);
+ m.put("cf", "cq", "value");
+ batchWriter.addMutation(m);
+ }
+ // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time for the table to split if need be.
+ batchWriter.close();
+ conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
+ Thread.sleep(500);
+
+ // Make sure all the data that was put in the table is still correct
+ int count = 0;
+ final Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
+ for (Entry<Key,Value> entry : scanner) {
+ Key k = entry.getKey();
+ data[data.length - 1] = (byte) count;
+ String expected = new String(data, UTF_8);
+ Assert.assertEquals(expected, k.getRow().toString());
+ Assert.assertEquals("cf", k.getColumnFamily().toString());
+ Assert.assertEquals("cq", k.getColumnQualifier().toString());
+ Assert.assertEquals("value", entry.getValue().toString());
+ count++;
+ }
+ Assert.assertEquals(250, count);
+
+ // Make sure no splits occurred in the table
+ Assert.assertEquals(0, conn.tableOperations().listSplits(tableName).size());
+ }
+
+ // 10 0's; 10 2's; 10 4's... 10 30's etc
+ @Test(timeout = 60 * 1000)
+ public void automaticSplitWithGaps() throws Exception {
+ log.info("Automatic Split With Gaps");
+
+ automaticSplit(30, 2);
+ }
+
+ // 10 0's; 10 1's; 10 2's... 10 15's etc
+ @Test(timeout = 60 * 1000)
+ public void automaticSplitWithoutGaps() throws Exception {
+ log.info("Automatic Split Without Gaps");
+
+ automaticSplit(15, 1);
+ }
+
+ @Test(timeout = 60 * 1000)
+ public void automaticSplitLater() throws Exception {
+ log.info("Split later");
+ automaticSplit(15, 1);
+
+ final Connector conn = getConnector();
+
+ String tableName = new String();
+ java.util.Iterator<String> iterator = conn.tableOperations().list().iterator();
+
+ while (iterator.hasNext()) {
+ String curr = iterator.next();
+ if (!curr.startsWith(Namespaces.ACCUMULO_NAMESPACE + ".")) {
+ tableName = curr;
+ }
+ }
+
+ // Create a BatchWriter and key for a table entry that is longer than the allowed size for an end row
+ BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ byte data[] = new byte[10];
+
+ // Fill key with all j's except for last spot which alternates through 1 through 10 for every j value
+ for (int j = 15; j < 150; j += 1) {
+ for (int i = 0; i < data.length - 1; i++) {
+ data[i] = (byte) j;
+ }
+
+ for (int i = 0; i < 25; i++) {
+ data[data.length - 1] = (byte) i;
+ Mutation m = new Mutation(data);
+ m.put("cf", "cq", "value");
+ batchWriter.addMutation(m);
+ }
+ }
+ // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time for the table to split if need be.
+ batchWriter.close();
+ conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
+
+ // Make sure a split occurs
+ while (conn.tableOperations().listSplits(tableName).size() == 0) {
+ Thread.sleep(250);
+ }
+
+ Assert.assertTrue(0 < conn.tableOperations().listSplits(tableName).size());
+ }
+
+ private void automaticSplit(int max, int spacing) throws Exception {
+ // make a table and lower the configure properties
+ final String tableName = getUniqueNames(1)[0];
+ final Connector conn = getConnector();
+ conn.tableOperations().create(tableName);
+ conn.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+ conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
+ conn.tableOperations().setProperty(tableName, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64");
+ conn.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
+
+ // Create a BatchWriter and key for a table entry that is longer than the allowed size for an end row
+ BatchWriter batchWriter = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ byte data[] = new byte[(int) (TableConfiguration.getMemoryInBytes(Property.TABLE_MAX_END_ROW_SIZE.getDefaultValue()) + 2)];
+
+ // Fill key with all j's except for last spot which alternates through 1 through 10 for every j value
+ for (int j = 0; j < max; j += spacing) {
+ for (int i = 0; i < data.length - 1; i++) {
+ data[i] = (byte) j;
+ }
+
+ for (int i = 0; i < 10; i++) {
+ data[data.length - 1] = (byte) i;
+ Mutation m = new Mutation(data);
+ m.put("cf", "cq", "value");
+ batchWriter.addMutation(m);
+ }
+ }
+ // Flush the BatchWriter and table and sleep for a bit to make sure that there is enough time for the table to split if need be.
+ batchWriter.close();
+ conn.tableOperations().flush(tableName, new Text(), new Text("z"), true);
+ Thread.sleep(500);
+
+ // Make sure all the data that was put in the table is still correct
+ int count = 0;
+ int extra = 10;
+ final Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
+ for (Entry<Key,Value> entry : scanner) {
+ if (extra == 10) {
+ extra = 0;
+ for (int i = 0; i < data.length - 1; i++) {
+ data[i] = (byte) count;
+ }
+ count += spacing;
+
+ }
+ Key k = entry.getKey();
+ data[data.length - 1] = (byte) extra;
+ String expected = new String(data, UTF_8);
+ Assert.assertEquals(expected, k.getRow().toString());
+ Assert.assertEquals("cf", k.getColumnFamily().toString());
+ Assert.assertEquals("cq", k.getColumnQualifier().toString());
+ Assert.assertEquals("value", entry.getValue().toString());
+ extra++;
+ }
+ Assert.assertEquals(10, extra);
+ Assert.assertEquals(max, count);
+
+ // Make sure no splits occured in the table
+ Assert.assertEquals(0, conn.tableOperations().listSplits(tableName).size());
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java b/test/src/main/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java
new file mode 100644
index 0000000..9babeba
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/MasterRepairsDualAssignmentIT.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.replication.ReplicationTable;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.master.state.MetaDataStateStore;
+import org.apache.accumulo.server.master.state.RootTabletStateStore;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.master.state.TabletLocationState;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class MasterRepairsDualAssignmentIT extends ConfigurableMacBase {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 5 * 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ cfg.setProperty(Property.MASTER_RECOVERY_DELAY, "5s");
+ // use raw local file system so walogs sync and flush will work
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Test
+ public void test() throws Exception {
+ // make some tablets, spread 'em around
+ Connector c = getConnector();
+ ClientContext context = new ClientContext(c.getInstance(), new Credentials("root", new PasswordToken(ROOT_PASSWORD)), getClientConfig());
+ String table = this.getUniqueNames(1)[0];
+ c.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
+ c.securityOperations().grantTablePermission("root", RootTable.NAME, TablePermission.WRITE);
+ c.tableOperations().create(table);
+ SortedSet<Text> partitions = new TreeSet<Text>();
+ for (String part : "a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")) {
+ partitions.add(new Text(part));
+ }
+ c.tableOperations().addSplits(table, partitions);
+ // scan the metadata table and get the two table location states
+ Set<TServerInstance> states = new HashSet<TServerInstance>();
+ Set<TabletLocationState> oldLocations = new HashSet<TabletLocationState>();
+ MetaDataStateStore store = new MetaDataStateStore(context, null);
+ while (states.size() < 2) {
+ UtilWaitThread.sleep(250);
+ oldLocations.clear();
+ for (TabletLocationState tls : store) {
+ if (tls.current != null) {
+ states.add(tls.current);
+ oldLocations.add(tls);
+ }
+ }
+ }
+ assertEquals(2, states.size());
+ // Kill a tablet server... we don't care which one... wait for everything to be reassigned
+ cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
+ Set<TServerInstance> replStates = new HashSet<>();
+ // Find out which tablet server remains
+ while (true) {
+ UtilWaitThread.sleep(1000);
+ states.clear();
+ replStates.clear();
+ boolean allAssigned = true;
+ for (TabletLocationState tls : store) {
+ if (tls != null && tls.current != null) {
+ states.add(tls.current);
+ } else if (tls != null && tls.extent.equals(new KeyExtent(new Text(ReplicationTable.ID), null, null))) {
+ replStates.add(tls.current);
+ } else {
+ allAssigned = false;
+ }
+ }
+ System.out.println(states + " size " + states.size() + " allAssigned " + allAssigned);
+ if (states.size() != 2 && allAssigned == true)
+ break;
+ }
+ assertEquals(1, replStates.size());
+ assertEquals(1, states.size());
+ // pick an assigned tablet and assign it to the old tablet
+ TabletLocationState moved = null;
+ for (TabletLocationState old : oldLocations) {
+ if (!states.contains(old.current)) {
+ moved = old;
+ }
+ }
+ assertNotEquals(null, moved);
+ // throw a mutation in as if we were the dying tablet
+ BatchWriter bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+ Mutation assignment = new Mutation(moved.extent.getMetadataEntry());
+ moved.current.putLocation(assignment);
+ bw.addMutation(assignment);
+ bw.close();
+ // wait for the master to fix the problem
+ waitForCleanStore(store);
+ // now jam up the metadata table
+ bw = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+ assignment = new Mutation(new KeyExtent(new Text(MetadataTable.ID), null, null).getMetadataEntry());
+ moved.current.putLocation(assignment);
+ bw.addMutation(assignment);
+ bw.close();
+ waitForCleanStore(new RootTabletStateStore(context, null));
+ }
+
+ private void waitForCleanStore(MetaDataStateStore store) {
+ while (true) {
+ try {
+ Iterators.size(store.iterator());
+ } catch (Exception ex) {
+ System.out.println(ex);
+ UtilWaitThread.sleep(250);
+ continue;
+ }
+ break;
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/MetaConstraintRetryIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/MetaConstraintRetryIT.java b/test/src/main/java/org/apache/accumulo/test/MetaConstraintRetryIT.java
new file mode 100644
index 0000000..727859f
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/MetaConstraintRetryIT.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.test;
+
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.impl.Writer;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MetaConstraintRetryIT extends AccumuloClusterHarness {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 30;
+ }
+
+ // a test for ACCUMULO-3096
+ @Test(expected = ConstraintViolationException.class)
+ public void test() throws Exception {
+
+ getConnector().securityOperations().grantTablePermission(getAdminPrincipal(), MetadataTable.NAME, TablePermission.WRITE);
+
+ Credentials credentials = new Credentials(getAdminPrincipal(), getAdminToken());
+ ClientContext context = new ClientContext(getConnector().getInstance(), credentials, cluster.getClientConfig());
+ Writer w = new Writer(context, MetadataTable.ID);
+ KeyExtent extent = new KeyExtent(new Text("5"), null, null);
+
+ Mutation m = new Mutation(extent.getMetadataEntry());
+ // unknown columns should cause contraint violation
+ m.put("badcolfam", "badcolqual", "3");
+
+ try {
+ MetadataTableUtil.update(w, null, m);
+ } catch (RuntimeException e) {
+ if (e.getCause().getClass().equals(ConstraintViolationException.class)) {
+ throw (ConstraintViolationException) e.getCause();
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/MetaGetsReadersIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/MetaGetsReadersIT.java b/test/src/main/java/org/apache/accumulo/test/MetaGetsReadersIT.java
new file mode 100644
index 0000000..84a5996
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/MetaGetsReadersIT.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.accumulo.test.functional.SlowIterator;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class MetaGetsReadersIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.TSERV_SCAN_MAX_OPENFILES, "2");
+ cfg.setProperty(Property.TABLE_BLOCKCACHE_ENABLED, "false");
+ }
+
+ private static Thread slowScan(final Connector c, final String tableName, final AtomicBoolean stop) {
+ Thread thread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ while (stop.get() == false) {
+ Scanner s = c.createScanner(tableName, Authorizations.EMPTY);
+ IteratorSetting is = new IteratorSetting(50, SlowIterator.class);
+ SlowIterator.setSleepTime(is, 10);
+ s.addScanIterator(is);
+ Iterator<Entry<Key,Value>> iterator = s.iterator();
+ while (iterator.hasNext() && stop.get() == false) {
+ iterator.next();
+ }
+ }
+ } catch (Exception ex) {
+ log.trace("{}", ex.getMessage(), ex);
+ stop.set(true);
+ }
+ }
+ };
+ return thread;
+ }
+
+ @Test(timeout = 2 * 60 * 1000)
+ public void test() throws Exception {
+ final String tableName = getUniqueNames(1)[0];
+ final Connector c = getConnector();
+ c.tableOperations().create(tableName);
+ Random random = new Random();
+ BatchWriter bw = c.createBatchWriter(tableName, null);
+ for (int i = 0; i < 50000; i++) {
+ byte[] row = new byte[100];
+ random.nextBytes(row);
+ Mutation m = new Mutation(row);
+ m.put("", "", "");
+ bw.addMutation(m);
+ }
+ bw.close();
+ c.tableOperations().flush(tableName, null, null, true);
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ Thread t1 = slowScan(c, tableName, stop);
+ t1.start();
+ Thread t2 = slowScan(c, tableName, stop);
+ t2.start();
+ UtilWaitThread.sleep(500);
+ long now = System.currentTimeMillis();
+ Scanner m = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ Iterators.size(m.iterator());
+ long delay = System.currentTimeMillis() - now;
+ System.out.println("Delay = " + delay);
+ assertTrue("metadata table scan was slow", delay < 1000);
+ assertFalse(stop.get());
+ stop.set(true);
+ t1.interrupt();
+ t2.interrupt();
+ t1.join();
+ t2.join();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java b/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java
new file mode 100644
index 0000000..0bc78fb
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/MetaSplitIT.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MetaSplitIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(MetaSplitIT.class);
+
+ private Collection<Text> metadataSplits = null;
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 3 * 60;
+ }
+
+ @Before
+ public void saveMetadataSplits() throws Exception {
+ if (ClusterType.STANDALONE == getClusterType()) {
+ Connector conn = getConnector();
+ Collection<Text> splits = conn.tableOperations().listSplits(MetadataTable.NAME);
+ // We expect a single split
+ if (!splits.equals(Arrays.asList(new Text("~")))) {
+ log.info("Existing splits on metadata table. Saving them, and applying single original split of '~'");
+ metadataSplits = splits;
+ conn.tableOperations().merge(MetadataTable.NAME, null, null);
+ conn.tableOperations().addSplits(MetadataTable.NAME, new TreeSet<Text>(Collections.singleton(new Text("~"))));
+ }
+ }
+ }
+
+ @After
+ public void restoreMetadataSplits() throws Exception {
+ if (null != metadataSplits) {
+ log.info("Restoring split on metadata table");
+ Connector conn = getConnector();
+ conn.tableOperations().merge(MetadataTable.NAME, null, null);
+ conn.tableOperations().addSplits(MetadataTable.NAME, new TreeSet<Text>(metadataSplits));
+ }
+ }
+
+ @Test(expected = AccumuloException.class)
+ public void testRootTableSplit() throws Exception {
+ TableOperations opts = getConnector().tableOperations();
+ SortedSet<Text> splits = new TreeSet<Text>();
+ splits.add(new Text("5"));
+ opts.addSplits(RootTable.NAME, splits);
+ }
+
+ @Test
+ public void testRootTableMerge() throws Exception {
+ TableOperations opts = getConnector().tableOperations();
+ opts.merge(RootTable.NAME, null, null);
+ }
+
+ private void addSplits(TableOperations opts, String... points) throws Exception {
+ SortedSet<Text> splits = new TreeSet<Text>();
+ for (String point : points) {
+ splits.add(new Text(point));
+ }
+ opts.addSplits(MetadataTable.NAME, splits);
+ }
+
+ @Test
+ public void testMetadataTableSplit() throws Exception {
+ TableOperations opts = getConnector().tableOperations();
+ for (int i = 1; i <= 10; i++) {
+ opts.create(Integer.toString(i));
+ }
+ try {
+ opts.merge(MetadataTable.NAME, new Text("01"), new Text("02"));
+ checkMetadataSplits(1, opts);
+ addSplits(opts, "4 5 6 7 8".split(" "));
+ checkMetadataSplits(6, opts);
+ opts.merge(MetadataTable.NAME, new Text("6"), new Text("9"));
+ checkMetadataSplits(4, opts);
+ addSplits(opts, "44 55 66 77 88".split(" "));
+ checkMetadataSplits(9, opts);
+ opts.merge(MetadataTable.NAME, new Text("5"), new Text("7"));
+ checkMetadataSplits(6, opts);
+ opts.merge(MetadataTable.NAME, null, null);
+ checkMetadataSplits(0, opts);
+ } finally {
+ for (int i = 1; i <= 10; i++) {
+ opts.delete(Integer.toString(i));
+ }
+ }
+ }
+
+ private static void checkMetadataSplits(int numSplits, TableOperations opts) throws AccumuloSecurityException, TableNotFoundException, AccumuloException,
+ InterruptedException {
+ for (int i = 0; i < 10; i++) {
+ if (opts.listSplits(MetadataTable.NAME).size() == numSplits) {
+ break;
+ }
+ Thread.sleep(2000);
+ }
+ Collection<Text> splits = opts.listSplits(MetadataTable.NAME);
+ assertEquals("Actual metadata table splits: " + splits, numSplits, splits.size());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
new file mode 100644
index 0000000..b3bf196
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/MissingWalHeaderCompletesRecoveryIT.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.io.File;
+import java.util.UUID;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.tabletserver.log.LogEntry;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.accumulo.tserver.log.DfsLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterables;
+
+/**
+ *
+ */
+public class MissingWalHeaderCompletesRecoveryIT extends ConfigurableMacBase {
+ private static final Logger log = LoggerFactory.getLogger(MissingWalHeaderCompletesRecoveryIT.class);
+
+ private boolean rootHasWritePermission;
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration conf) {
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.MASTER_RECOVERY_DELAY, "1s");
+ // Make sure the GC doesn't delete the file before the metadata reference is added
+ cfg.setProperty(Property.GC_CYCLE_START, "999999s");
+ conf.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Before
+ public void setupMetadataPermission() throws Exception {
+ Connector conn = getConnector();
+ rootHasWritePermission = conn.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
+ if (!rootHasWritePermission) {
+ conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
+ // Make sure it propagates through ZK
+ Thread.sleep(5000);
+ }
+ }
+
+ @After
+ public void resetMetadataPermission() throws Exception {
+ Connector conn = getConnector();
+ // Final state doesn't match the original
+ if (rootHasWritePermission != conn.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE)) {
+ if (rootHasWritePermission) {
+ // root had write permission when starting, ensure root still does
+ conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
+ } else {
+ // root did not have write permission when starting, ensure that it does not
+ conn.securityOperations().revokeTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
+ }
+ }
+ }
+
+ @Test
+ public void testEmptyWalRecoveryCompletes() throws Exception {
+ Connector conn = getConnector();
+ MiniAccumuloClusterImpl cluster = getCluster();
+ FileSystem fs = cluster.getFileSystem();
+
+ // Fake out something that looks like host:port, it's irrelevant
+ String fakeServer = "127.0.0.1:12345";
+
+ File walogs = new File(cluster.getConfig().getAccumuloDir(), ServerConstants.WAL_DIR);
+ File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
+ File emptyWalog = new File(walogServerDir, UUID.randomUUID().toString());
+
+ log.info("Created empty WAL at " + emptyWalog.toURI());
+
+ fs.create(new Path(emptyWalog.toURI())).close();
+
+ Assert.assertTrue("root user did not have write permission to metadata table",
+ conn.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE));
+
+ String tableName = getUniqueNames(1)[0];
+ conn.tableOperations().create(tableName);
+
+ String tableId = conn.tableOperations().tableIdMap().get(tableName);
+ Assert.assertNotNull("Table ID was null", tableId);
+
+ LogEntry logEntry = new LogEntry(new KeyExtent(new Text(tableId), null, null), 0, "127.0.0.1:12345", emptyWalog.toURI().toString());
+
+ log.info("Taking {} offline", tableName);
+ conn.tableOperations().offline(tableName, true);
+
+ log.info("{} is offline", tableName);
+
+ Text row = MetadataSchema.TabletsSection.getRow(new Text(tableId), null);
+ Mutation m = new Mutation(row);
+ m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
+
+ BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+ bw.addMutation(m);
+ bw.close();
+
+ log.info("Bringing {} online", tableName);
+ conn.tableOperations().online(tableName, true);
+
+ log.info("{} is online", tableName);
+
+ // Reading the table implies that recovery completed successfully (the empty file was ignored)
+ // otherwise the tablet will never come online and we won't be able to read it.
+ Scanner s = conn.createScanner(tableName, Authorizations.EMPTY);
+ Assert.assertEquals(0, Iterables.size(s));
+ }
+
+ @Test
+ public void testPartialHeaderWalRecoveryCompletes() throws Exception {
+ Connector conn = getConnector();
+ MiniAccumuloClusterImpl cluster = getCluster();
+ FileSystem fs = getCluster().getFileSystem();
+
+ // Fake out something that looks like host:port, it's irrelevant
+ String fakeServer = "127.0.0.1:12345";
+
+ File walogs = new File(cluster.getConfig().getAccumuloDir(), ServerConstants.WAL_DIR);
+ File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
+ File partialHeaderWalog = new File(walogServerDir, UUID.randomUUID().toString());
+
+ log.info("Created WAL with malformed header at " + partialHeaderWalog.toURI());
+
+ // Write half of the header
+ FSDataOutputStream wal = fs.create(new Path(partialHeaderWalog.toURI()));
+ wal.write(DfsLogger.LOG_FILE_HEADER_V3.getBytes(UTF_8), 0, DfsLogger.LOG_FILE_HEADER_V3.length() / 2);
+ wal.close();
+
+ Assert.assertTrue("root user did not have write permission to metadata table",
+ conn.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE));
+
+ String tableName = getUniqueNames(1)[0];
+ conn.tableOperations().create(tableName);
+
+ String tableId = conn.tableOperations().tableIdMap().get(tableName);
+ Assert.assertNotNull("Table ID was null", tableId);
+
+ LogEntry logEntry = new LogEntry(null, 0, "127.0.0.1:12345", partialHeaderWalog.toURI().toString());
+
+ log.info("Taking {} offline", tableName);
+ conn.tableOperations().offline(tableName, true);
+
+ log.info("{} is offline", tableName);
+
+ Text row = MetadataSchema.TabletsSection.getRow(new Text(tableId), null);
+ Mutation m = new Mutation(row);
+ m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
+
+ BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+ bw.addMutation(m);
+ bw.close();
+
+ log.info("Bringing {} online", tableName);
+ conn.tableOperations().online(tableName, true);
+
+ log.info("{} is online", tableName);
+
+ // Reading the table implies that recovery completed successfully (the empty file was ignored)
+ // otherwise the tablet will never come online and we won't be able to read it.
+ Scanner s = conn.createScanner(tableName, Authorizations.EMPTY);
+ Assert.assertEquals(0, Iterables.size(s));
+ }
+
+}
[38/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java b/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
new file mode 100644
index 0000000..b3f8959
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/RewriteTabletDirectoriesIT.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.util.Collections;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.init.Initialize;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.accumulo.server.util.RandomizeVolumes;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+// ACCUMULO-3263
+public class RewriteTabletDirectoriesIT extends ConfigurableMacBase {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ private Path v1, v2;
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ File baseDir = cfg.getDir();
+ File volDirBase = new File(baseDir, "volumes");
+ File v1f = new File(volDirBase, "v1");
+ File v2f = new File(volDirBase, "v2");
+ v1 = new Path("file://" + v1f.getAbsolutePath());
+ v2 = new Path("file://" + v2f.getAbsolutePath());
+
+ // Use a VolumeChooser which should be more fair
+ cfg.setProperty(Property.GENERAL_VOLUME_CHOOSER, FairVolumeChooser.class.getName());
+ // Run MAC on two locations in the local file system
+ cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString());
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ super.configure(cfg, hadoopCoreSite);
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ c.securityOperations().grantTablePermission(c.whoami(), MetadataTable.NAME, TablePermission.WRITE);
+ final String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+
+ // Write some data to a table and add some splits
+ BatchWriter bw = c.createBatchWriter(tableName, null);
+ final SortedSet<Text> splits = new TreeSet<Text>();
+ for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
+ splits.add(new Text(split));
+ Mutation m = new Mutation(new Text(split));
+ m.put(new byte[] {}, new byte[] {}, new byte[] {});
+ bw.addMutation(m);
+ }
+ bw.close();
+ c.tableOperations().addSplits(tableName, splits);
+
+ BatchScanner scanner = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
+ DIRECTORY_COLUMN.fetch(scanner);
+ String tableId = c.tableOperations().tableIdMap().get(tableName);
+ assertNotNull("TableID for " + tableName + " was null", tableId);
+ scanner.setRanges(Collections.singletonList(TabletsSection.getRange(tableId)));
+ // verify the directory entries are all on v1, make a few entries relative
+ bw = c.createBatchWriter(MetadataTable.NAME, null);
+ int count = 0;
+ for (Entry<Key,Value> entry : scanner) {
+ assertTrue("Expected " + entry.getValue() + " to contain " + v1, entry.getValue().toString().contains(v1.toString()));
+ count++;
+ if (count % 2 == 0) {
+ String parts[] = entry.getValue().toString().split("/");
+ Key key = entry.getKey();
+ Mutation m = new Mutation(key.getRow());
+ m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value((Path.SEPARATOR + parts[parts.length - 1]).getBytes()));
+ bw.addMutation(m);
+ }
+ }
+ bw.close();
+ assertEquals(splits.size() + 1, count);
+
+ // This should fail: only one volume
+ assertEquals(1, cluster.exec(RandomizeVolumes.class, "-z", cluster.getZooKeepers(), "-i", c.getInstance().getInstanceName(), "-t", tableName).waitFor());
+
+ cluster.stop();
+
+ // add the 2nd volume
+ Configuration conf = new Configuration(false);
+ conf.addResource(new Path(cluster.getConfig().getConfDir().toURI().toString(), "accumulo-site.xml"));
+ conf.set(Property.INSTANCE_VOLUMES.getKey(), v1.toString() + "," + v2.toString());
+ BufferedOutputStream fos = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "accumulo-site.xml")));
+ conf.writeXml(fos);
+ fos.close();
+
+ // initialize volume
+ assertEquals(0, cluster.exec(Initialize.class, "--add-volumes").waitFor());
+ cluster.start();
+ c = getConnector();
+
+ // change the directory entries
+ assertEquals(0, cluster.exec(Admin.class, "randomizeVolumes", "-t", tableName).waitFor());
+
+ // verify a more equal sharing
+ int v1Count = 0, v2Count = 0;
+ for (Entry<Key,Value> entry : scanner) {
+ if (entry.getValue().toString().contains(v1.toString())) {
+ v1Count++;
+ }
+ if (entry.getValue().toString().contains(v2.toString())) {
+ v2Count++;
+ }
+ }
+
+ log.info("Count for volume1: " + v1Count);
+ log.info("Count for volume2: " + v2Count);
+
+ assertEquals(splits.size() + 1, v1Count + v2Count);
+ // a fair chooser will differ by less than count(volumes)
+ assertTrue("Expected the number of files to differ between volumes by less than 10. " + v1Count + " " + v2Count, Math.abs(v1Count - v2Count) < 2);
+ // verify we can read the old data
+ count = 0;
+ for (Entry<Key,Value> entry : c.createScanner(tableName, Authorizations.EMPTY)) {
+ assertTrue("Found unexpected entry in table: " + entry, splits.contains(entry.getKey().getRow()));
+ count++;
+ }
+ assertEquals(splits.size(), count);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/ScanIteratorIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/ScanIteratorIT.java b/test/src/main/java/org/apache/accumulo/test/ScanIteratorIT.java
new file mode 100644
index 0000000..00ac235
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/ScanIteratorIT.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Collections;
+import java.util.Map;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.ScannerBase;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.AuthsIterator;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ScanIteratorIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(ScanIteratorIT.class);
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ private Connector connector;
+ private String tableName;
+ private String user;
+ private boolean saslEnabled;
+
+ @Before
+ public void setup() throws Exception {
+ connector = getConnector();
+ tableName = getUniqueNames(1)[0];
+
+ connector.tableOperations().create(tableName);
+ ClientConfiguration clientConfig = cluster.getClientConfig();
+ ClusterUser clusterUser = getUser(0);
+ user = clusterUser.getPrincipal();
+ PasswordToken userToken;
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ userToken = null;
+ saslEnabled = true;
+ } else {
+ userToken = new PasswordToken(clusterUser.getPassword());
+ saslEnabled = false;
+ }
+ if (connector.securityOperations().listLocalUsers().contains(user)) {
+ log.info("Dropping {}", user);
+ connector.securityOperations().dropLocalUser(user);
+ }
+ connector.securityOperations().createLocalUser(user, userToken);
+ connector.securityOperations().grantTablePermission(user, tableName, TablePermission.READ);
+ connector.securityOperations().grantTablePermission(user, tableName, TablePermission.WRITE);
+ connector.securityOperations().changeUserAuthorizations(user, AuthsIterator.AUTHS);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (null != user) {
+ if (saslEnabled) {
+ ClusterUser rootUser = getAdminUser();
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ }
+ connector.securityOperations().dropLocalUser(user);
+ }
+ }
+
+ @Test
+ public void testAuthsPresentInIteratorEnvironment() throws Exception {
+ runTest(AuthsIterator.AUTHS, false);
+ }
+
+ @Test
+ public void testAuthsNotPresentInIteratorEnvironment() throws Exception {
+ runTest(new Authorizations("B"), true);
+ }
+
+ @Test
+ public void testEmptyAuthsInIteratorEnvironment() throws Exception {
+ runTest(Authorizations.EMPTY, true);
+ }
+
+ private void runTest(ScannerBase scanner, Authorizations auths, boolean shouldFail) throws AccumuloSecurityException, AccumuloException,
+ TableNotFoundException {
+ int count = 0;
+ for (Map.Entry<Key,Value> entry : scanner) {
+ assertEquals(shouldFail ? AuthsIterator.FAIL : AuthsIterator.SUCCESS, entry.getKey().getRow().toString());
+ count++;
+ }
+
+ assertEquals(1, count);
+ }
+
+ private void runTest(Authorizations auths, boolean shouldFail) throws Exception {
+ ClusterUser clusterUser = getUser(0);
+ Connector userC = getCluster().getConnector(clusterUser.getPrincipal(), clusterUser.getToken());
+ writeTestMutation(userC);
+
+ IteratorSetting setting = new IteratorSetting(10, AuthsIterator.class);
+
+ Scanner scanner = userC.createScanner(tableName, auths);
+ scanner.addScanIterator(setting);
+
+ BatchScanner batchScanner = userC.createBatchScanner(tableName, auths, 1);
+ batchScanner.setRanges(Collections.singleton(new Range("1")));
+ batchScanner.addScanIterator(setting);
+
+ runTest(scanner, auths, shouldFail);
+ runTest(batchScanner, auths, shouldFail);
+
+ scanner.close();
+ batchScanner.close();
+ }
+
+ private void writeTestMutation(Connector userC) throws TableNotFoundException, MutationsRejectedException {
+ BatchWriter batchWriter = userC.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("1");
+ m.put(new Text("2"), new Text("3"), new Value("".getBytes()));
+ batchWriter.addMutation(m);
+ batchWriter.flush();
+ batchWriter.close();
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java b/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
new file mode 100644
index 0000000..4f83668
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/ShellConfigIT.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.nio.charset.StandardCharsets;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.harness.conf.StandaloneAccumuloClusterConfiguration;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.test.ShellServerIT.TestShell;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class ShellConfigIT extends AccumuloClusterHarness {
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 30;
+ }
+
+ private String origPropValue;
+
+ @Before
+ public void checkProperty() throws Exception {
+ Connector conn = getConnector();
+ // TABLE_VOLUME_CHOOSER is a valid property that can be updated in ZK, whereas the crypto properties are not.
+ // This lets us run this test more generically rather than forcibly needing to update some property in accumulo-site.xml
+ origPropValue = conn.instanceOperations().getSystemConfiguration().get(Property.TABLE_VOLUME_CHOOSER.getKey());
+ conn.instanceOperations().setProperty(Property.TABLE_VOLUME_CHOOSER.getKey(), FairVolumeChooser.class.getName());
+ }
+
+ @After
+ public void resetProperty() throws Exception {
+ if (null != origPropValue) {
+ Connector conn = getConnector();
+ conn.instanceOperations().setProperty(Property.TABLE_VOLUME_CHOOSER.getKey(), origPropValue);
+ }
+ }
+
+ @Test
+ public void experimentalPropTest() throws Exception {
+ // ensure experimental props do not show up in config output unless set
+
+ AuthenticationToken token = getAdminToken();
+ File clientConfFile = null;
+ switch (getClusterType()) {
+ case MINI:
+ MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) getCluster();
+ clientConfFile = mac.getConfig().getClientConfFile();
+ break;
+ case STANDALONE:
+ StandaloneAccumuloClusterConfiguration standaloneConf = (StandaloneAccumuloClusterConfiguration) getClusterConfiguration();
+ clientConfFile = standaloneConf.getClientConfFile();
+ break;
+ default:
+ Assert.fail("Unknown cluster type");
+ }
+
+ Assert.assertNotNull(clientConfFile);
+
+ TestShell ts = null;
+ if (token instanceof PasswordToken) {
+ String passwd = new String(((PasswordToken) token).getPassword(), StandardCharsets.UTF_8);
+ ts = new TestShell(getAdminPrincipal(), passwd, getCluster().getInstanceName(), getCluster().getZooKeepers(), clientConfFile);
+ } else if (token instanceof KerberosToken) {
+ ts = new TestShell(getAdminPrincipal(), null, getCluster().getInstanceName(), getCluster().getZooKeepers(), clientConfFile);
+ } else {
+ Assert.fail("Unknown token type");
+ }
+
+ assertTrue(Property.TABLE_VOLUME_CHOOSER.isExperimental());
+ assertTrue(Property.CRYPTO_CIPHER_ALGORITHM_NAME.isExperimental());
+
+ String configOutput = ts.exec("config");
+
+ assertTrue(configOutput.contains(Property.TABLE_VOLUME_CHOOSER.getKey()));
+ assertFalse(configOutput.contains(Property.CRYPTO_CIPHER_ALGORITHM_NAME.getKey()));
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
new file mode 100644
index 0000000..7740492
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/ShellServerIT.java
@@ -0,0 +1,1609 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.lang.reflect.Constructor;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+
+import jline.console.ConsoleReader;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.impl.Namespaces;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVWriter;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.apache.accumulo.shell.Shell;
+import org.apache.accumulo.test.functional.SlowIterator;
+import org.apache.accumulo.tracer.TraceServer;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.tools.DistCp;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+
+public class ShellServerIT extends SharedMiniClusterBase {
+ public static class TestOutputStream extends OutputStream {
+ StringBuilder sb = new StringBuilder();
+
+ @Override
+ public void write(int b) throws IOException {
+ sb.append((char) (0xff & b));
+ }
+
+ public String get() {
+ return sb.toString();
+ }
+
+ public void clear() {
+ sb.setLength(0);
+ }
+ }
+
+ private static final Logger log = LoggerFactory.getLogger(ShellServerIT.class);
+
+ public static class StringInputStream extends InputStream {
+ private String source = "";
+ private int offset = 0;
+
+ @Override
+ public int read() throws IOException {
+ if (offset == source.length())
+ return '\n';
+ else
+ return source.charAt(offset++);
+ }
+
+ public void set(String other) {
+ source = other;
+ offset = 0;
+ }
+ }
+
+ private static abstract class ErrorMessageCallback {
+ public abstract String getErrorMessage();
+ }
+
+ private static class NoOpErrorMessageCallback extends ErrorMessageCallback {
+ private static final String empty = "";
+
+ @Override
+ public String getErrorMessage() {
+ return empty;
+ }
+ }
+
+ public static class TestShell {
+ public TestOutputStream output;
+ public StringInputStream input;
+ public Shell shell;
+
+ TestShell(String user, String rootPass, String instanceName, String zookeepers, File configFile) throws IOException {
+ ClientConfiguration clientConf;
+ try {
+ clientConf = new ClientConfiguration(configFile);
+ } catch (ConfigurationException e) {
+ throw new IOException(e);
+ }
+ // start the shell
+ output = new TestOutputStream();
+ input = new StringInputStream();
+ PrintWriter pw = new PrintWriter(new OutputStreamWriter(output));
+ shell = new Shell(new ConsoleReader(input, output), pw);
+ shell.setLogErrorsToConsole();
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ // Pull the kerberos principal out when we're using SASL
+ shell.config("-u", user, "-z", instanceName, zookeepers, "--config-file", configFile.getAbsolutePath());
+ } else {
+ shell.config("-u", user, "-p", rootPass, "-z", instanceName, zookeepers, "--config-file", configFile.getAbsolutePath());
+ }
+ exec("quit", true);
+ shell.start();
+ shell.setExit(false);
+ }
+
+ String exec(String cmd) throws IOException {
+ output.clear();
+ shell.execCommand(cmd, true, true);
+ return output.get();
+ }
+
+ String exec(String cmd, boolean expectGoodExit) throws IOException {
+ return exec(cmd, expectGoodExit, noop);
+ }
+
+ String exec(String cmd, boolean expectGoodExit, ErrorMessageCallback callback) throws IOException {
+ String result = exec(cmd);
+ if (expectGoodExit)
+ assertGoodExit("", true, callback);
+ else
+ assertBadExit("", true, callback);
+ return result;
+ }
+
+ String exec(String cmd, boolean expectGoodExit, String expectString) throws IOException {
+ return exec(cmd, expectGoodExit, expectString, noop);
+ }
+
+ String exec(String cmd, boolean expectGoodExit, String expectString, ErrorMessageCallback callback) throws IOException {
+ return exec(cmd, expectGoodExit, expectString, true, callback);
+ }
+
+ String exec(String cmd, boolean expectGoodExit, String expectString, boolean stringPresent) throws IOException {
+ return exec(cmd, expectGoodExit, expectString, stringPresent, noop);
+ }
+
+ String exec(String cmd, boolean expectGoodExit, String expectString, boolean stringPresent, ErrorMessageCallback callback) throws IOException {
+ String result = exec(cmd);
+ if (expectGoodExit)
+ assertGoodExit(expectString, stringPresent, callback);
+ else
+ assertBadExit(expectString, stringPresent, callback);
+ return result;
+ }
+
+ void assertGoodExit(String s, boolean stringPresent) {
+ assertGoodExit(s, stringPresent, noop);
+ }
+
+ void assertGoodExit(String s, boolean stringPresent, ErrorMessageCallback callback) {
+ Shell.log.info(output.get());
+ if (0 != shell.getExitCode()) {
+ String errorMsg = callback.getErrorMessage();
+ assertEquals(errorMsg, 0, shell.getExitCode());
+ }
+
+ if (s.length() > 0)
+ assertEquals(s + " present in " + output.get() + " was not " + stringPresent, stringPresent, output.get().contains(s));
+ }
+
+ void assertBadExit(String s, boolean stringPresent, ErrorMessageCallback callback) {
+ Shell.log.debug(output.get());
+ if (0 == shell.getExitCode()) {
+ String errorMsg = callback.getErrorMessage();
+ assertTrue(errorMsg, shell.getExitCode() > 0);
+ }
+
+ if (s.length() > 0)
+ assertEquals(s + " present in " + output.get() + " was not " + stringPresent, stringPresent, output.get().contains(s));
+ shell.resetExitCode();
+ }
+ }
+
+ private static final NoOpErrorMessageCallback noop = new NoOpErrorMessageCallback();
+
+ private TestShell ts;
+
+ private static Process traceProcess;
+ private static String rootPath;
+
+ @Rule
+ public TestName name = new TestName();
+
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ rootPath = getMiniClusterDir().getAbsolutePath();
+
+ // history file is updated in $HOME
+ System.setProperty("HOME", rootPath);
+ System.setProperty("hadoop.tmp.dir", System.getProperty("user.dir") + "/target/hadoop-tmp");
+
+ traceProcess = getCluster().exec(TraceServer.class);
+
+ Connector conn = getCluster().getConnector(getPrincipal(), getToken());
+ TableOperations tops = conn.tableOperations();
+
+ // give the tracer some time to start
+ while (!tops.exists("trace")) {
+ UtilWaitThread.sleep(1000);
+ }
+ }
+
+ @Before
+ public void setupShell() throws Exception {
+ ts = new TestShell(getPrincipal(), getRootPassword(), getCluster().getConfig().getInstanceName(), getCluster().getConfig().getZooKeepers(), getCluster()
+ .getConfig().getClientConfFile());
+ }
+
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ if (null != traceProcess) {
+ traceProcess.destroy();
+ }
+ }
+
+ @After
+ public void deleteTables() throws Exception {
+ Connector c = getConnector();
+ for (String table : c.tableOperations().list()) {
+ if (!table.startsWith(Namespaces.ACCUMULO_NAMESPACE + ".") && !table.equals("trace"))
+ try {
+ c.tableOperations().delete(table);
+ } catch (TableNotFoundException e) {
+ // don't care
+ }
+ }
+ }
+
+ @After
+ public void tearDownShell() {
+ ts.shell.shutdown();
+ }
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void exporttableImporttable() throws Exception {
+ final String table = name.getMethodName(), table2 = table + "2";
+
+ // exporttable / importtable
+ ts.exec("createtable " + table + " -evc", true);
+ make10();
+ ts.exec("addsplits row5", true);
+ ts.exec("config -t " + table + " -s table.split.threshold=345M", true);
+ ts.exec("offline " + table, true);
+ File exportDir = new File(rootPath, "ShellServerIT.export");
+ String exportUri = "file://" + exportDir.toString();
+ String localTmp = "file://" + new File(rootPath, "ShellServerIT.tmp").toString();
+ ts.exec("exporttable -t " + table + " " + exportUri, true);
+ DistCp cp = newDistCp();
+ String import_ = "file://" + new File(rootPath, "ShellServerIT.import").toString();
+ if (getCluster().getClientConfig().getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ // DistCp bugs out trying to get a fs delegation token to perform the cp. Just copy it ourselves by hand.
+ FileSystem fs = getCluster().getFileSystem();
+ FileSystem localFs = FileSystem.getLocal(new Configuration(false));
+
+ // Path on local fs to cp into
+ Path localTmpPath = new Path(localTmp);
+ localFs.mkdirs(localTmpPath);
+
+ // Path in remote fs to importtable from
+ Path importDir = new Path(import_);
+ fs.mkdirs(importDir);
+
+ // Implement a poor-man's DistCp
+ try (BufferedReader reader = new BufferedReader(new FileReader(new File(exportDir, "distcp.txt")))) {
+ for (String line; (line = reader.readLine()) != null;) {
+ Path exportedFile = new Path(line);
+ // There isn't a cp on FileSystem??
+ log.info("Copying " + line + " to " + localTmpPath);
+ fs.copyToLocalFile(exportedFile, localTmpPath);
+ Path tmpFile = new Path(localTmpPath, exportedFile.getName());
+ log.info("Moving " + tmpFile + " to the import directory " + importDir);
+ fs.moveFromLocalFile(tmpFile, importDir);
+ }
+ }
+ } else {
+ String[] distCpArgs = new String[] {"-f", exportUri + "/distcp.txt", import_};
+ assertEquals("Failed to run distcp: " + Arrays.toString(distCpArgs), 0, cp.run(distCpArgs));
+ }
+ ts.exec("importtable " + table2 + " " + import_, true);
+ ts.exec("config -t " + table2 + " -np", true, "345M", true);
+ ts.exec("getsplits -t " + table2, true, "row5", true);
+ ts.exec("constraint --list -t " + table2, true, "VisibilityConstraint=2", true);
+ ts.exec("online " + table, true);
+ ts.exec("deletetable -f " + table, true);
+ ts.exec("deletetable -f " + table2, true);
+ }
+
+ private DistCp newDistCp() {
+ try {
+ @SuppressWarnings("unchecked")
+ Constructor<DistCp>[] constructors = (Constructor<DistCp>[]) DistCp.class.getConstructors();
+ for (Constructor<DistCp> constructor : constructors) {
+ Class<?>[] parameterTypes = constructor.getParameterTypes();
+ if (parameterTypes.length > 0 && parameterTypes[0].equals(Configuration.class)) {
+ if (parameterTypes.length == 1) {
+ return constructor.newInstance(new Configuration());
+ } else if (parameterTypes.length == 2) {
+ return constructor.newInstance(new Configuration(), null);
+ }
+ }
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ throw new RuntimeException("Unexpected constructors for DistCp");
+ }
+
+ @Test
+ public void setscaniterDeletescaniter() throws Exception {
+ final String table = name.getMethodName();
+
+ // setscaniter, deletescaniter
+ ts.exec("createtable " + table);
+ ts.exec("insert a cf cq 1");
+ ts.exec("insert a cf cq 1");
+ ts.exec("insert a cf cq 1");
+ ts.input.set("true\n\n\nSTRING");
+ ts.exec("setscaniter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n name", true);
+ ts.exec("scan", true, "3", true);
+ ts.exec("deletescaniter -n name", true);
+ ts.exec("scan", true, "1", true);
+ ts.exec("deletetable -f " + table);
+
+ }
+
+ @Test
+ public void execfile() throws Exception {
+ // execfile
+ File file = File.createTempFile("ShellServerIT.execfile", ".conf", new File(rootPath));
+ PrintWriter writer = new PrintWriter(file.getAbsolutePath());
+ writer.println("about");
+ writer.close();
+ ts.exec("execfile " + file.getAbsolutePath(), true, Constants.VERSION, true);
+
+ }
+
+ @Test
+ public void egrep() throws Exception {
+ final String table = name.getMethodName();
+
+ // egrep
+ ts.exec("createtable " + table);
+ make10();
+ String lines = ts.exec("egrep row[123]", true);
+ assertTrue(lines.split("\n").length - 1 == 3);
+ ts.exec("deletetable -f " + table);
+ }
+
+ @Test
+ public void du() throws Exception {
+ final String table = name.getMethodName();
+
+ // create and delete a table so we get out of a table context in the shell
+ ts.exec("notable", true);
+
+ // Calling du not in a table context shouldn't throw an error
+ ts.output.clear();
+ ts.exec("du", true, "", true);
+
+ ts.output.clear();
+ ts.exec("createtable " + table);
+ make10();
+ ts.exec("flush -t " + table + " -w");
+ ts.exec("du " + table, true, " [" + table + "]", true);
+ ts.output.clear();
+ ts.shell.execCommand("du -h", false, false);
+ String o = ts.output.get();
+ // for some reason, there's a bit of fluctuation
+ assertTrue("Output did not match regex: '" + o + "'", o.matches(".*[1-9][0-9][0-9]\\s\\[" + table + "\\]\\n"));
+ ts.exec("deletetable -f " + table);
+ }
+
+ @Test
+ public void debug() throws Exception {
+ ts.exec("debug", true, "off", true);
+ ts.exec("debug on", true);
+ ts.exec("debug", true, "on", true);
+ ts.exec("debug off", true);
+ ts.exec("debug", true, "off", true);
+ ts.exec("debug debug", false);
+ ts.exec("debug debug debug", false);
+ }
+
+ @Test
+ public void user() throws Exception {
+ final String table = name.getMethodName();
+ final boolean kerberosEnabled = getToken() instanceof KerberosToken;
+
+ // createuser, deleteuser, user, users, droptable, grant, revoke
+ if (!kerberosEnabled) {
+ ts.input.set("secret\nsecret\n");
+ }
+ ts.exec("createuser xyzzy", true);
+ ts.exec("users", true, "xyzzy", true);
+ String perms = ts.exec("userpermissions -u xyzzy", true);
+ assertTrue(perms.contains("Table permissions (" + MetadataTable.NAME + "): Table.READ"));
+ ts.exec("grant -u xyzzy -s System.CREATE_TABLE", true);
+ perms = ts.exec("userpermissions -u xyzzy", true);
+ assertTrue(perms.contains(""));
+ ts.exec("grant -u " + getPrincipal() + " -t " + MetadataTable.NAME + " Table.WRITE", true);
+ ts.exec("grant -u " + getPrincipal() + " -t " + MetadataTable.NAME + " Table.GOOFY", false);
+ ts.exec("grant -u " + getPrincipal() + " -s foo", false);
+ ts.exec("grant -u xyzzy -t " + MetadataTable.NAME + " foo", false);
+ if (!kerberosEnabled) {
+ ts.input.set("secret\nsecret\n");
+ ts.exec("user xyzzy", true);
+ ts.exec("createtable " + table, true, "xyzzy@", true);
+ ts.exec("insert row1 cf cq 1", true);
+ ts.exec("scan", true, "row1", true);
+ ts.exec("droptable -f " + table, true);
+ ts.input.set(getRootPassword() + "\n" + getRootPassword() + "\n");
+ ts.exec("user root", true);
+ }
+ ts.exec("deleteuser " + getPrincipal(), false, "delete yourself", true);
+ ts.exec("revoke -u xyzzy -s System.CREATE_TABLE", true);
+ ts.exec("revoke -u xyzzy -s System.GOOFY", false);
+ ts.exec("revoke -u xyzzy -s foo", false);
+ ts.exec("revoke -u xyzzy -t " + MetadataTable.NAME + " Table.WRITE", true);
+ ts.exec("revoke -u xyzzy -t " + MetadataTable.NAME + " Table.GOOFY", false);
+ ts.exec("revoke -u xyzzy -t " + MetadataTable.NAME + " foo", false);
+ ts.exec("deleteuser xyzzy", true, "deleteuser { xyzzy } (yes|no)?", true);
+ ts.exec("deleteuser -f xyzzy", true);
+ ts.exec("users", true, "xyzzy", false);
+ }
+
+ @Test
+ public void durability() throws Exception {
+ final String table = name.getMethodName();
+ ts.exec("createtable " + table);
+ ts.exec("insert -d none a cf cq randomGunkaASDFWEAQRd");
+ ts.exec("insert -d foo a cf cq2 2", false, "foo", true);
+ ts.exec("scan -r a", true, "randomGunkaASDFWEAQRd", true);
+ ts.exec("scan -r a", true, "foo", false);
+ }
+
+ @Test
+ public void iter() throws Exception {
+ final String table = name.getMethodName();
+
+ // setshelliter, listshelliter, deleteshelliter
+ ts.exec("createtable " + table);
+ ts.exec("insert a cf cq 1");
+ ts.exec("insert a cf cq 1");
+ ts.exec("insert a cf cq 1");
+ ts.input.set("true\n\n\nSTRING\n");
+ ts.exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -pn sum -n name", true);
+ ts.exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -pn sum -n name", false);
+ ts.exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -pn sum -n other", false);
+ ts.input.set("true\n\n\nSTRING\n");
+ ts.exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -pn sum -n xyzzy", true);
+ ts.exec("scan -pn sum", true, "3", true);
+ ts.exec("listshelliter", true, "Iterator name", true);
+ ts.exec("listshelliter", true, "Iterator xyzzy", true);
+ ts.exec("listshelliter", true, "Profile : sum", true);
+ ts.exec("deleteshelliter -pn sum -n name", true);
+ ts.exec("listshelliter", true, "Iterator name", false);
+ ts.exec("listshelliter", true, "Iterator xyzzy", true);
+ ts.exec("deleteshelliter -pn sum -a", true);
+ ts.exec("listshelliter", true, "Iterator xyzzy", false);
+ ts.exec("listshelliter", true, "Profile : sum", false);
+ ts.exec("deletetable -f " + table);
+ // list iter
+ ts.exec("createtable " + table);
+ ts.exec("insert a cf cq 1");
+ ts.exec("insert a cf cq 1");
+ ts.exec("insert a cf cq 1");
+ ts.input.set("true\n\n\nSTRING\n");
+ ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n name", true);
+ ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -n name", false);
+ ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n other", false);
+ ts.input.set("true\n\n\nSTRING\n");
+ ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -n xyzzy", true);
+ ts.exec("scan", true, "3", true);
+ ts.exec("listiter -scan", true, "Iterator name", true);
+ ts.exec("listiter -scan", true, "Iterator xyzzy", true);
+ ts.exec("listiter -minc", true, "Iterator name", false);
+ ts.exec("listiter -minc", true, "Iterator xyzzy", false);
+ ts.exec("deleteiter -scan -n name", true);
+ ts.exec("listiter -scan", true, "Iterator name", false);
+ ts.exec("listiter -scan", true, "Iterator xyzzy", true);
+ ts.exec("deletetable -f " + table);
+
+ }
+
+ @Test
+ public void setIterOptionPrompt() throws Exception {
+ Connector conn = getConnector();
+ String tableName = name.getMethodName();
+
+ ts.exec("createtable " + tableName);
+ ts.input.set("\n\n");
+ // Setting a non-optiondescriber with no name should fail
+ ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30", false);
+
+ // Name as option will work
+ ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30 -name cfcounter", true);
+
+ String expectedKey = "table.iterator.scan.cfcounter";
+ String expectedValue = "30,org.apache.accumulo.core.iterators.ColumnFamilyCounter";
+ TableOperations tops = conn.tableOperations();
+ checkTableForProperty(tops, tableName, expectedKey, expectedValue);
+
+ ts.exec("deletetable " + tableName, true);
+ tableName = tableName + "1";
+
+ ts.exec("createtable " + tableName, true);
+
+ ts.input.set("customcfcounter\n\n");
+
+ // Name on the CLI should override OptionDescriber (or user input name, in this case)
+ ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30", true);
+ expectedKey = "table.iterator.scan.customcfcounter";
+ expectedValue = "30,org.apache.accumulo.core.iterators.ColumnFamilyCounter";
+ checkTableForProperty(tops, tableName, expectedKey, expectedValue);
+
+ ts.exec("deletetable " + tableName, true);
+ tableName = tableName + "1";
+
+ ts.exec("createtable " + tableName, true);
+
+ ts.input.set("customcfcounter\nname1 value1\nname2 value2\n\n");
+
+ // Name on the CLI should override OptionDescriber (or user input name, in this case)
+ ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30", true);
+ expectedKey = "table.iterator.scan.customcfcounter";
+ expectedValue = "30,org.apache.accumulo.core.iterators.ColumnFamilyCounter";
+ checkTableForProperty(tops, tableName, expectedKey, expectedValue);
+ expectedKey = "table.iterator.scan.customcfcounter.opt.name1";
+ expectedValue = "value1";
+ checkTableForProperty(tops, tableName, expectedKey, expectedValue);
+ expectedKey = "table.iterator.scan.customcfcounter.opt.name2";
+ expectedValue = "value2";
+ checkTableForProperty(tops, tableName, expectedKey, expectedValue);
+
+ ts.exec("deletetable " + tableName, true);
+ tableName = tableName + "1";
+
+ ts.exec("createtable " + tableName, true);
+
+ ts.input.set("\nname1 value1.1,value1.2,value1.3\nname2 value2\n\n");
+
+ // Name on the CLI should override OptionDescriber (or user input name, in this case)
+ ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30 -name cfcounter", true);
+ expectedKey = "table.iterator.scan.cfcounter";
+ expectedValue = "30,org.apache.accumulo.core.iterators.ColumnFamilyCounter";
+ checkTableForProperty(tops, tableName, expectedKey, expectedValue);
+ expectedKey = "table.iterator.scan.cfcounter.opt.name1";
+ expectedValue = "value1.1,value1.2,value1.3";
+ checkTableForProperty(tops, tableName, expectedKey, expectedValue);
+ expectedKey = "table.iterator.scan.cfcounter.opt.name2";
+ expectedValue = "value2";
+ checkTableForProperty(tops, tableName, expectedKey, expectedValue);
+ }
+
+ protected void checkTableForProperty(TableOperations tops, String tableName, String expectedKey, String expectedValue) throws Exception {
+ for (int i = 0; i < 5; i++) {
+ for (Entry<String,String> entry : tops.getProperties(tableName)) {
+ if (expectedKey.equals(entry.getKey())) {
+ assertEquals(expectedValue, entry.getValue());
+ return;
+ }
+ }
+ Thread.sleep(500);
+ }
+
+ fail("Failed to find expected property on " + tableName + ": " + expectedKey + "=" + expectedValue);
+ }
+
+ @Test
+ public void notable() throws Exception {
+ final String table = name.getMethodName();
+
+ // notable
+ ts.exec("createtable " + table, true);
+ ts.exec("scan", true, " " + table + ">", true);
+ assertTrue(ts.output.get().contains(" " + table + ">"));
+ ts.exec("notable", true);
+ ts.exec("scan", false, "Not in a table context.", true);
+ assertFalse(ts.output.get().contains(" " + table + ">"));
+ ts.exec("deletetable -f " + table);
+ }
+
+ @Test
+ public void sleep() throws Exception {
+ // sleep
+ long now = System.currentTimeMillis();
+ ts.exec("sleep 0.2", true);
+ long diff = System.currentTimeMillis() - now;
+ assertTrue("Diff was actually " + diff, diff >= 200);
+ assertTrue("Diff was actually " + diff, diff < 600);
+ }
+
+ @Test
+ public void addauths() throws Exception {
+ final String table = name.getMethodName();
+ // addauths
+ ts.exec("createtable " + table + " -evc");
+ boolean success = false;
+ for (int i = 0; i < 9 && !success; i++) {
+ try {
+ ts.exec("insert a b c d -l foo", false, "does not have authorization", true, new ErrorMessageCallback() {
+ @Override
+ public String getErrorMessage() {
+ try {
+ Connector c = getConnector();
+ return "Current auths for root are: " + c.securityOperations().getUserAuthorizations("root").toString();
+ } catch (Exception e) {
+ return "Could not check authorizations";
+ }
+ }
+ });
+ } catch (AssertionError e) {
+ Thread.sleep(200);
+ }
+ }
+ if (!success) {
+ ts.exec("insert a b c d -l foo", false, "does not have authorization", true, new ErrorMessageCallback() {
+ @Override
+ public String getErrorMessage() {
+ try {
+ Connector c = getConnector();
+ return "Current auths for root are: " + c.securityOperations().getUserAuthorizations("root").toString();
+ } catch (Exception e) {
+ return "Could not check authorizations";
+ }
+ }
+ });
+ }
+ ts.exec("addauths -s foo,bar", true);
+ boolean passed = false;
+ for (int i = 0; i < 50 && !passed; i++) {
+ try {
+ ts.exec("getauths", true, "foo", true);
+ ts.exec("getauths", true, "bar", true);
+ passed = true;
+ } catch (Exception e) {
+ UtilWaitThread.sleep(300);
+ }
+ }
+ assertTrue("Could not successfully see updated authoriations", passed);
+ ts.exec("insert a b c d -l foo");
+ ts.exec("scan", true, "[foo]");
+ ts.exec("scan -s bar", true, "[foo]", false);
+ ts.exec("deletetable -f " + table);
+ }
+
+ @Test
+ public void getAuths() throws Exception {
+ Assume.assumeFalse("test skipped for kerberos", getToken() instanceof KerberosToken);
+
+ // create two users with different auths
+ for (int i = 1; i <= 2; i++) {
+ String userName = name.getMethodName() + "user" + i;
+ String password = "password" + i;
+ String auths = "auth" + i + "A,auth" + i + "B";
+ ts.exec("createuser " + userName, true);
+ ts.exec(password, true);
+ ts.exec("addauths -u " + userName + " -s " + auths, true);
+ }
+
+ // get auths using root user, which has System.SYSTEM
+ ts.exec("getauths -u getAuthsuser1", true, "auth1A", true);
+ ts.exec("getauths -u getAuthsuser1", true, "auth1B", true);
+ ts.exec("getauths -u getAuthsuser2", true, "auth2A", true);
+ ts.exec("getauths -u getAuthsuser2", true, "auth2B", true);
+
+ // grant the first user the ability to see other users auths
+ ts.exec("grant -u getAuthsuser1 -s System.ALTER_USER", true);
+
+ // switch to first user (the one with the ALTER_USER perm)
+ ts.exec("user getAuthsuser1", true);
+ ts.exec("password1", true);
+
+ // get auths for self and other user
+ ts.exec("getauths -u getAuthsuser1", true, "auth1A", true);
+ ts.exec("getauths -u getAuthsuser1", true, "auth1B", true);
+ ts.exec("getauths -u getAuthsuser2", true, "auth2A", true);
+ ts.exec("getauths -u getAuthsuser2", true, "auth2B", true);
+
+ // switch to second user (the one without the ALTER_USER perm)
+ ts.exec("user getAuthsuser2", true);
+ ts.exec("password2", true);
+
+ // get auths for self, but not other user
+ ts.exec("getauths -u getAuthsuser2", true, "auth2A", true);
+ ts.exec("getauths -u getAuthsuser2", true, "auth2B", true);
+ ts.exec("getauths -u getAuthsuser1", false, "PERMISSION_DENIED", true);
+ ts.exec("getauths -u getAuthsuser1", false, "PERMISSION_DENIED", true);
+ }
+
+ @Test
+ public void byeQuitExit() throws Exception {
+ // bye, quit, exit
+ for (String cmd : "bye quit exit".split(" ")) {
+ assertFalse(ts.shell.getExit());
+ ts.exec(cmd);
+ assertTrue(ts.shell.getExit());
+ ts.shell.setExit(false);
+ }
+ }
+
+ @Test
+ public void classpath() throws Exception {
+ // classpath
+ ts.exec("classpath", true, "Level 2: Java Classloader (loads everything defined by java classpath) URL classpath items are", true);
+ }
+
+ @Test
+ public void clearCls() throws Exception {
+ // clear/cls
+ if (ts.shell.getReader().getTerminal().isAnsiSupported()) {
+ ts.exec("cls", true, "[1;1H");
+ ts.exec("clear", true, "[2J");
+ } else {
+ ts.exec("cls", false, "does not support");
+ ts.exec("clear", false, "does not support");
+ }
+ }
+
+ @Test
+ public void clonetable() throws Exception {
+ final String table = name.getMethodName(), clone = table + "_clone";
+
+ // clonetable
+ ts.exec("createtable " + table + " -evc");
+ ts.exec("config -t " + table + " -s table.split.threshold=123M", true);
+ ts.exec("addsplits -t " + table + " a b c", true);
+ ts.exec("insert a b c value");
+ ts.exec("scan", true, "value", true);
+ ts.exec("clonetable " + table + " " + clone);
+ // verify constraint, config, and splits were cloned
+ ts.exec("constraint --list -t " + clone, true, "VisibilityConstraint=2", true);
+ ts.exec("config -t " + clone + " -np", true, "123M", true);
+ ts.exec("getsplits -t " + clone, true, "a\nb\nc\n");
+ ts.exec("deletetable -f " + table);
+ ts.exec("deletetable -f " + clone);
+ }
+
+ @Test
+ public void createTableWithProperties() throws Exception {
+ final String table = name.getMethodName();
+
+ // create table with initial properties
+ String testProp = "table.custom.description=description,table.custom.testProp=testProp," + Property.TABLE_SPLIT_THRESHOLD.getKey() + "=10K";
+
+ ts.exec("createtable " + table + " -prop " + testProp, true);
+ ts.exec("insert a b c value", true);
+ ts.exec("scan", true, "value", true);
+
+ Connector connector = getConnector();
+ for (Entry<String,String> entry : connector.tableOperations().getProperties(table)) {
+ if (entry.getKey().equals("table.custom.description"))
+ Assert.assertTrue("Initial property was not set correctly", entry.getValue().equals("description"));
+
+ if (entry.getKey().equals("table.custom.testProp"))
+ Assert.assertTrue("Initial property was not set correctly", entry.getValue().equals("testProp"));
+
+ if (entry.getKey().equals(Property.TABLE_SPLIT_THRESHOLD.getKey()))
+ Assert.assertTrue("Initial property was not set correctly", entry.getValue().equals("10K"));
+
+ }
+ ts.exec("deletetable -f " + table);
+ }
+
+ @Test
+ public void testCompactions() throws Exception {
+ final String table = name.getMethodName();
+
+ // compact
+ ts.exec("createtable " + table);
+
+ String tableId = getTableId(table);
+
+ // make two files
+ ts.exec("insert a b c d");
+ ts.exec("flush -w");
+ ts.exec("insert x y z v");
+ ts.exec("flush -w");
+ int oldCount = countFiles(tableId);
+ // merge two files into one
+ ts.exec("compact -t " + table + " -w");
+ assertTrue(countFiles(tableId) < oldCount);
+ ts.exec("addsplits -t " + table + " f");
+ // make two more files:
+ ts.exec("insert m 1 2 3");
+ ts.exec("flush -w");
+ ts.exec("insert n 1 2 v901");
+ ts.exec("flush -w");
+ List<String> oldFiles = getFiles(tableId);
+
+ // at this point there are 4 files in the default tablet
+ assertEquals("Files that were found: " + oldFiles, 4, oldFiles.size());
+
+ // compact some data:
+ ts.exec("compact -b g -e z -w");
+ assertEquals(2, countFiles(tableId));
+ ts.exec("compact -w");
+ assertEquals(2, countFiles(tableId));
+ ts.exec("merge --all -t " + table);
+ ts.exec("compact -w");
+ assertEquals(1, countFiles(tableId));
+
+ // test compaction strategy
+ ts.exec("insert z 1 2 v900");
+ ts.exec("compact -w -s " + TestCompactionStrategy.class.getName() + " -sc inputPrefix=F,dropPrefix=A");
+ assertEquals(1, countFiles(tableId));
+ ts.exec("scan", true, "v900", true);
+ ts.exec("scan", true, "v901", false);
+
+ ts.exec("deletetable -f " + table);
+ }
+
+ @Test
+ public void testCompactionSelection() throws Exception {
+ final String table = name.getMethodName();
+ final String clone = table + "_clone";
+
+ ts.exec("createtable " + table);
+ ts.exec("insert a b c d");
+ ts.exec("flush -w");
+ ts.exec("insert x y z v");
+ ts.exec("flush -w");
+
+ ts.exec("clonetable -s " + Property.TABLE_MAJC_RATIO.getKey() + "=10 " + table + " " + clone);
+
+ ts.exec("table " + clone);
+ ts.exec("insert m n l o");
+ ts.exec("flush -w");
+
+ String tableId = getTableId(table);
+ String cloneId = getTableId(clone);
+
+ assertEquals(3, countFiles(cloneId));
+
+ // compact only files from src table
+ ts.exec("compact -t " + clone + " -w --sf-epath .*tables/" + tableId + ".*");
+
+ assertEquals(2, countFiles(cloneId));
+
+ ts.exec("insert r s t u");
+ ts.exec("flush -w");
+
+ assertEquals(3, countFiles(cloneId));
+
+ // compact all flush files
+ ts.exec("compact -t " + clone + " -w --sf-ename F.*");
+
+ assertEquals(2, countFiles(cloneId));
+
+ // create two large files
+ Random rand = new Random();
+ StringBuilder sb = new StringBuilder("insert b v q ");
+ for (int i = 0; i < 10000; i++) {
+ sb.append('a' + rand.nextInt(26));
+ }
+
+ ts.exec(sb.toString());
+ ts.exec("flush -w");
+
+ ts.exec(sb.toString());
+ ts.exec("flush -w");
+
+ assertEquals(4, countFiles(cloneId));
+
+ // compact only small files
+ ts.exec("compact -t " + clone + " -w --sf-lt-esize 1000");
+
+ assertEquals(3, countFiles(cloneId));
+
+ // compact large files if 3 or more
+ ts.exec("compact -t " + clone + " -w --sf-gt-esize 1K --min-files 3");
+
+ assertEquals(3, countFiles(cloneId));
+
+ // compact large files if 2 or more
+ ts.exec("compact -t " + clone + " -w --sf-gt-esize 1K --min-files 2");
+
+ assertEquals(2, countFiles(cloneId));
+
+ // compact if tablet has 3 or more files
+ ts.exec("compact -t " + clone + " -w --min-files 3");
+
+ assertEquals(2, countFiles(cloneId));
+
+ // compact if tablet has 2 or more files
+ ts.exec("compact -t " + clone + " -w --min-files 2");
+
+ assertEquals(1, countFiles(cloneId));
+
+ // create two small and one large flush files in order to test AND
+ ts.exec(sb.toString());
+ ts.exec("flush -w");
+
+ ts.exec("insert m n l o");
+ ts.exec("flush -w");
+
+ ts.exec("insert m n l o");
+ ts.exec("flush -w");
+
+ assertEquals(4, countFiles(cloneId));
+
+ // should only compact two small flush files leaving large flush file
+ ts.exec("compact -t " + clone + " -w --sf-ename F.* --sf-lt-esize 1K");
+
+ assertEquals(3, countFiles(cloneId));
+ }
+
+ @Test
+ public void testCompactionSelectionAndStrategy() throws Exception {
+
+ final String table = name.getMethodName();
+
+ ts.exec("createtable " + table);
+
+ // expect this to fail
+ ts.exec("compact -t " + table + " -w --sf-ename F.* -s " + TestCompactionStrategy.class.getName() + " -sc inputPrefix=F,dropPrefix=A", false);
+ }
+
+ @Test
+ public void constraint() throws Exception {
+ final String table = name.getMethodName();
+
+ // constraint
+ ts.exec("constraint -l -t " + MetadataTable.NAME + "", true, "MetadataConstraints=1", true);
+ ts.exec("createtable " + table + " -evc");
+
+ // Make sure the table is fully propagated through zoocache
+ getTableId(table);
+
+ ts.exec("constraint -l -t " + table, true, "VisibilityConstraint=2", true);
+ ts.exec("constraint -t " + table + " -d 2", true, "Removed constraint 2 from table " + table);
+ // wait for zookeeper updates to propagate
+ UtilWaitThread.sleep(1000);
+ ts.exec("constraint -l -t " + table, true, "VisibilityConstraint=2", false);
+ ts.exec("deletetable -f " + table);
+ }
+
+ @Test
+ public void deletemany() throws Exception {
+ final String table = name.getMethodName();
+
+ // deletemany
+ ts.exec("createtable " + table);
+ make10();
+ assertEquals(10, countkeys(table));
+ ts.exec("deletemany -f -b row8");
+ assertEquals(8, countkeys(table));
+ ts.exec("scan -t " + table + " -np", true, "row8", false);
+ make10();
+ ts.exec("deletemany -f -b row4 -e row5");
+ assertEquals(8, countkeys(table));
+ make10();
+ ts.exec("deletemany -f -c cf:col4,cf:col5");
+ assertEquals(8, countkeys(table));
+ make10();
+ ts.exec("deletemany -f -r row3");
+ assertEquals(9, countkeys(table));
+ make10();
+ ts.exec("deletemany -f -r row3");
+ assertEquals(9, countkeys(table));
+ make10();
+ ts.exec("deletemany -f -b row3 -be -e row5 -ee");
+ assertEquals(9, countkeys(table));
+ ts.exec("deletetable -f " + table);
+ }
+
+ @Test
+ public void deleterows() throws Exception {
+ final String table = name.getMethodName();
+
+ ts.exec("createtable " + table);
+ final String tableId = getTableId(table);
+
+ // deleterows
+ int base = countFiles(tableId);
+ assertEquals(0, base);
+
+ log.info("Adding 2 splits");
+ ts.exec("addsplits row5 row7");
+
+ log.info("Writing 10 records");
+ make10();
+
+ log.info("Flushing table");
+ ts.exec("flush -w -t " + table);
+ log.info("Table flush completed");
+
+ // One of the tablets we're writing to might migrate inbetween writing data which would create a 2nd file for that tablet
+ // If we notice this, compact and then move on.
+ List<String> files = getFiles(tableId);
+ if (3 < files.size()) {
+ log.info("More than 3 files were found, compacting before proceeding");
+ ts.exec("compact -w -t " + table);
+ files = getFiles(tableId);
+ assertEquals("Expected to only find 3 files after compaction: " + files, 3, files.size());
+ }
+
+ assertNotNull(files);
+ assertEquals("Found the following files: " + files, 3, files.size());
+ ts.exec("deleterows -t " + table + " -b row5 -e row7");
+ assertEquals(2, countFiles(tableId));
+ ts.exec("deletetable -f " + table);
+ }
+
+ @Test
+ public void groups() throws Exception {
+ final String table = name.getMethodName();
+
+ ts.exec("createtable " + table);
+ ts.exec("setgroups -t " + table + " alpha=a,b,c num=3,2,1");
+ ts.exec("getgroups -t " + table, true, "alpha=a,b,c", true);
+ ts.exec("getgroups -t " + table, true, "num=1,2,3", true);
+ ts.exec("deletetable -f " + table);
+ }
+
+ @Test
+ public void extensions() throws Exception {
+ String extName = "ExampleShellExtension";
+
+ // check for example extension
+ ts.exec("help", true, extName, false);
+ ts.exec("extensions -l", true, extName, false);
+
+ // enable extensions and check for example
+ ts.exec("extensions -e", true);
+ ts.exec("extensions -l", true, extName, true);
+ ts.exec("help", true, extName, true);
+
+ // test example extension command
+ ts.exec(extName + "::debug", true, "This is a test", true);
+
+ // disable extensions and check for example
+ ts.exec("extensions -d", true);
+ ts.exec("extensions -l", true, extName, false);
+ ts.exec("help", true, extName, false);
+
+ // ensure extensions are really disabled
+ ts.exec(extName + "::debug", true, "Unknown command", true);
+ }
+
+ @Test
+ public void grep() throws Exception {
+ final String table = name.getMethodName();
+
+ ts.exec("createtable " + table, true);
+ make10();
+ ts.exec("grep row[123]", true, "row1", false);
+ ts.exec("grep row5", true, "row5", true);
+ ts.exec("deletetable -f " + table, true);
+ }
+
+ @Test
+ public void help() throws Exception {
+ ts.exec("help -np", true, "Help Commands", true);
+ ts.exec("?", true, "Help Commands", true);
+ for (String c : ("bye exit quit " + "about help info ? " + "deleteiter deletescaniter listiter setiter setscaniter "
+ + "grant revoke systempermissions tablepermissions userpermissions " + "execfile history " + "authenticate cls clear notable sleep table user whoami "
+ + "clonetable config createtable deletetable droptable du exporttable importtable offline online renametable tables "
+ + "addsplits compact constraint flush getgropus getsplits merge setgroups " + "addauths createuser deleteuser dropuser getauths passwd setauths users "
+ + "delete deletemany deleterows egrep formatter interpreter grep importdirectory insert maxrow scan").split(" ")) {
+ ts.exec("help " + c, true);
+ }
+ }
+
+ // @Test(timeout = 45000)
+ public void history() throws Exception {
+ final String table = name.getMethodName();
+
+ ts.exec("history -c", true);
+ ts.exec("createtable " + table);
+ ts.exec("deletetable -f " + table);
+ ts.exec("history", true, table, true);
+ ts.exec("history", true, "history", true);
+ }
+
+ @Test
+ public void importDirectory() throws Exception {
+ final String table = name.getMethodName();
+
+ Configuration conf = new Configuration();
+ FileSystem fs = FileSystem.get(conf);
+ File importDir = new File(rootPath, "import");
+ assertTrue(importDir.mkdir());
+ String even = new File(importDir, "even.rf").toString();
+ String odd = new File(importDir, "odd.rf").toString();
+ File errorsDir = new File(rootPath, "errors");
+ assertTrue(errorsDir.mkdir());
+ fs.mkdirs(new Path(errorsDir.toString()));
+ AccumuloConfiguration aconf = AccumuloConfiguration.getDefaultConfiguration();
+ FileSKVWriter evenWriter = FileOperations.getInstance().openWriter(even, fs, conf, aconf);
+ evenWriter.startDefaultLocalityGroup();
+ FileSKVWriter oddWriter = FileOperations.getInstance().openWriter(odd, fs, conf, aconf);
+ oddWriter.startDefaultLocalityGroup();
+ long timestamp = System.currentTimeMillis();
+ Text cf = new Text("cf");
+ Text cq = new Text("cq");
+ Value value = new Value("value".getBytes());
+ for (int i = 0; i < 100; i += 2) {
+ Key key = new Key(new Text(String.format("%8d", i)), cf, cq, timestamp);
+ evenWriter.append(key, value);
+ key = new Key(new Text(String.format("%8d", i + 1)), cf, cq, timestamp);
+ oddWriter.append(key, value);
+ }
+ evenWriter.close();
+ oddWriter.close();
+ assertEquals(0, ts.shell.getExitCode());
+ ts.exec("createtable " + table, true);
+ ts.exec("importdirectory " + importDir + " " + errorsDir + " true", true);
+ ts.exec("scan -r 00000000", true, "00000000", true);
+ ts.exec("scan -r 00000099", true, "00000099", true);
+ ts.exec("deletetable -f " + table);
+ }
+
+ @Test
+ public void info() throws Exception {
+ ts.exec("info", true, Constants.VERSION, true);
+ }
+
+ @Test
+ public void interpreter() throws Exception {
+ final String table = name.getMethodName();
+
+ ts.exec("createtable " + table, true);
+ ts.exec("interpreter -l", true, "HexScan", false);
+ ts.exec("insert \\x02 cf cq value", true);
+ ts.exec("scan -b 02", true, "value", false);
+ ts.exec("interpreter -i org.apache.accumulo.core.util.interpret.HexScanInterpreter", true);
+ // Need to allow time for this to propagate through zoocache/zookeeper
+ UtilWaitThread.sleep(3000);
+
+ ts.exec("interpreter -l", true, "HexScan", true);
+ ts.exec("scan -b 02", true, "value", true);
+ ts.exec("deletetable -f " + table, true);
+ }
+
+ @Test
+ public void listcompactions() throws Exception {
+ final String table = name.getMethodName();
+
+ ts.exec("createtable " + table, true);
+ ts.exec("config -t " + table + " -s table.iterator.minc.slow=30,org.apache.accumulo.test.functional.SlowIterator", true);
+ ts.exec("config -t " + table + " -s table.iterator.minc.slow.opt.sleepTime=1000", true);
+ ts.exec("insert a cf cq value", true);
+ ts.exec("insert b cf cq value", true);
+ ts.exec("insert c cf cq value", true);
+ ts.exec("insert d cf cq value", true);
+ ts.exec("flush -t " + table, true);
+ ts.exec("sleep 0.2", true);
+ ts.exec("listcompactions", true, "default_tablet");
+ String[] lines = ts.output.get().split("\n");
+ String last = lines[lines.length - 1];
+ String[] parts = last.split("\\|");
+ assertEquals(12, parts.length);
+ ts.exec("deletetable -f " + table, true);
+ }
+
+ @Test
+ public void maxrow() throws Exception {
+ final String table = name.getMethodName();
+
+ ts.exec("createtable " + table, true);
+ ts.exec("insert a cf cq value", true);
+ ts.exec("insert b cf cq value", true);
+ ts.exec("insert ccc cf cq value", true);
+ ts.exec("insert zzz cf cq value", true);
+ ts.exec("maxrow", true, "zzz", true);
+ ts.exec("delete zzz cf cq", true);
+ ts.exec("maxrow", true, "ccc", true);
+ ts.exec("deletetable -f " + table, true);
+ }
+
+ @Test
+ public void merge() throws Exception {
+ final String table = name.getMethodName();
+
+ ts.exec("createtable " + table);
+ ts.exec("addsplits a m z");
+ ts.exec("getsplits", true, "z", true);
+ ts.exec("merge --all", true);
+ ts.exec("getsplits", true, "z", false);
+ ts.exec("deletetable -f " + table);
+ ts.exec("getsplits -t " + MetadataTable.NAME + "", true);
+ assertEquals(2, ts.output.get().split("\n").length);
+ ts.exec("getsplits -t accumulo.root", true);
+ assertEquals(1, ts.output.get().split("\n").length);
+ ts.exec("merge --all -t " + MetadataTable.NAME + "");
+ ts.exec("getsplits -t " + MetadataTable.NAME + "", true);
+ assertEquals(1, ts.output.get().split("\n").length);
+ }
+
+ @Test
+ public void ping() throws Exception {
+ for (int i = 0; i < 10; i++) {
+ ts.exec("ping", true, "OK", true);
+ // wait for both tservers to start up
+ if (ts.output.get().split("\n").length == 3)
+ break;
+ UtilWaitThread.sleep(1000);
+
+ }
+ assertEquals(3, ts.output.get().split("\n").length);
+ }
+
+ @Test
+ public void renametable() throws Exception {
+ final String table = name.getMethodName() + "1", rename = name.getMethodName() + "2";
+
+ ts.exec("createtable " + table);
+ ts.exec("insert this is a value");
+ ts.exec("renametable " + table + " " + rename);
+ ts.exec("tables", true, rename, true);
+ ts.exec("tables", true, table, false);
+ ts.exec("scan -t " + rename, true, "value", true);
+ ts.exec("deletetable -f " + rename, true);
+ }
+
+ @Test
+ public void tables() throws Exception {
+ final String table = name.getMethodName(), table1 = table + "_z", table2 = table + "_a";
+ ts.exec("createtable " + table1);
+ ts.exec("createtable " + table2);
+ ts.exec("notable");
+ String lst = ts.exec("tables -l");
+ assertTrue(lst.indexOf(table2) < lst.indexOf(table1));
+ lst = ts.exec("tables -l -s");
+ assertTrue(lst.indexOf(table1) < lst.indexOf(table2));
+ }
+
+ @Test
+ public void systempermission() throws Exception {
+ ts.exec("systempermissions");
+ assertEquals(12, ts.output.get().split("\n").length - 1);
+ ts.exec("tablepermissions", true);
+ assertEquals(6, ts.output.get().split("\n").length - 1);
+ }
+
+ @Test
+ public void listscans() throws Exception {
+ final String table = name.getMethodName();
+
+ ts.exec("createtable " + table, true);
+
+ // Should be about a 3 second scan
+ for (int i = 0; i < 6; i++) {
+ ts.exec("insert " + i + " cf cq value", true);
+ }
+ Connector connector = getConnector();
+ final Scanner s = connector.createScanner(table, Authorizations.EMPTY);
+ IteratorSetting cfg = new IteratorSetting(30, SlowIterator.class);
+ SlowIterator.setSleepTime(cfg, 500);
+ s.addScanIterator(cfg);
+
+ Thread thread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ Iterators.size(s.iterator());
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ };
+ thread.start();
+
+ List<String> scans = new ArrayList<String>();
+ // Try to find the active scan for about 15seconds
+ for (int i = 0; i < 50 && scans.isEmpty(); i++) {
+ String currentScans = ts.exec("listscans", true);
+ log.info("Got output from listscans:\n" + currentScans);
+ String[] lines = currentScans.split("\n");
+ for (int scanOffset = 2; scanOffset < lines.length; scanOffset++) {
+ String currentScan = lines[scanOffset];
+ if (currentScan.contains(table)) {
+ log.info("Retaining scan: " + currentScan);
+ scans.add(currentScan);
+ } else {
+ log.info("Ignoring scan because of wrong table: " + currentScan);
+ }
+ }
+ UtilWaitThread.sleep(300);
+ }
+ thread.join();
+
+ assertFalse("Could not find any active scans over table " + table, scans.isEmpty());
+
+ for (String scan : scans) {
+ if (!scan.contains("RUNNING")) {
+ log.info("Ignoring scan because it doesn't contain 'RUNNING': " + scan);
+ continue;
+ }
+ String parts[] = scan.split("\\|");
+ assertEquals("Expected 13 colums, but found " + parts.length + " instead for '" + Arrays.toString(parts) + "'", 13, parts.length);
+ String tserver = parts[0].trim();
+ // TODO: any way to tell if the client address is accurate? could be local IP, host, loopback...?
+ String hostPortPattern = ".+:\\d+";
+ assertTrue(tserver.matches(hostPortPattern));
+ assertTrue(getConnector().instanceOperations().getTabletServers().contains(tserver));
+ String client = parts[1].trim();
+ assertTrue(client.matches(hostPortPattern));
+ }
+
+ ts.exec("deletetable -f " + table, true);
+ }
+
+ @Test
+ public void testPertableClasspath() throws Exception {
+ final String table = name.getMethodName();
+
+ File fooFilterJar = File.createTempFile("FooFilter", ".jar", new File(rootPath));
+
+ FileUtils.copyURLToFile(this.getClass().getResource("/FooFilter.jar"), fooFilterJar);
+ fooFilterJar.deleteOnExit();
+
+ File fooConstraintJar = File.createTempFile("FooConstraint", ".jar", new File(rootPath));
+ FileUtils.copyURLToFile(this.getClass().getResource("/FooConstraint.jar"), fooConstraintJar);
+ fooConstraintJar.deleteOnExit();
+
+ ts.exec("config -s " + Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "cx1=" + fooFilterJar.toURI().toString() + ","
+ + fooConstraintJar.toURI().toString(), true);
+
+ ts.exec("createtable " + table, true);
+ ts.exec("config -t " + table + " -s " + Property.TABLE_CLASSPATH.getKey() + "=cx1", true);
+
+ UtilWaitThread.sleep(200);
+
+ // We can't use the setiter command as Filter implements OptionDescriber which
+ // forces us to enter more input that I don't know how to input
+ // Instead, we can just manually set the property on the table.
+ ts.exec("config -t " + table + " -s " + Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.foo=10,org.apache.accumulo.test.FooFilter");
+
+ ts.exec("insert foo f q v", true);
+
+ UtilWaitThread.sleep(100);
+
+ ts.exec("scan -np", true, "foo", false);
+
+ ts.exec("constraint -a FooConstraint", true);
+
+ ts.exec("offline -w " + table);
+ ts.exec("online -w " + table);
+
+ ts.exec("table " + table, true);
+ ts.exec("insert foo f q v", false);
+ ts.exec("insert ok foo q v", true);
+
+ ts.exec("deletetable -f " + table, true);
+ ts.exec("config -d " + Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "cx1");
+
+ }
+
+ @Test
+ public void trace() throws Exception {
+ // Make sure to not collide with the "trace" table
+ final String table = name.getMethodName() + "Test";
+
+ ts.exec("trace on", true);
+ ts.exec("createtable " + table, true);
+ ts.exec("insert a b c value", true);
+ ts.exec("scan -np", true, "value", true);
+ ts.exec("deletetable -f " + table);
+ ts.exec("sleep 1");
+ String trace = ts.exec("trace off");
+ System.out.println(trace);
+ assertTrue(trace.contains("sendMutations"));
+ assertTrue(trace.contains("startScan"));
+ assertTrue(trace.contains("DeleteTable"));
+ }
+
+ @Test
+ public void badLogin() throws Exception {
+ // Can't run with Kerberos, can't switch identity in shell presently
+ Assume.assumeTrue(getToken() instanceof PasswordToken);
+ ts.input.set(getRootPassword() + "\n");
+ String err = ts.exec("user NoSuchUser", false);
+ assertTrue(err.contains("BAD_CREDENTIALS for user NoSuchUser"));
+ }
+
+ @Test
+ public void namespaces() throws Exception {
+ ts.exec("namespaces", true, "\"\"", true); // default namespace, displayed as quoted empty string
+ ts.exec("namespaces", true, Namespaces.ACCUMULO_NAMESPACE, true);
+ ts.exec("createnamespace thing1", true);
+ String namespaces = ts.exec("namespaces");
+ assertTrue(namespaces.contains("thing1"));
+
+ ts.exec("renamenamespace thing1 thing2");
+ namespaces = ts.exec("namespaces");
+ assertTrue(namespaces.contains("thing2"));
+ assertTrue(!namespaces.contains("thing1"));
+
+ // can't delete a namespace that still contains tables, unless you do -f
+ ts.exec("createtable thing2.thingy", true);
+ ts.exec("deletenamespace thing2");
+ ts.exec("y");
+ ts.exec("namespaces", true, "thing2", true);
+
+ ts.exec("du -ns thing2", true, "thing2.thingy", true);
+
+ // all "TableOperation" commands can take a namespace
+ ts.exec("offline -ns thing2", true);
+ ts.exec("online -ns thing2", true);
+ ts.exec("flush -ns thing2", true);
+ ts.exec("compact -ns thing2", true);
+ ts.exec("createnamespace testers3", true);
+ ts.exec("createtable testers3.1", true);
+ ts.exec("createtable testers3.2", true);
+ ts.exec("deletetable -ns testers3 -f", true);
+ ts.exec("tables", true, "testers3.1", false);
+ ts.exec("namespaces", true, "testers3", true);
+ ts.exec("deletenamespace testers3 -f", true);
+ ts.input.set("true\n\n\nSTRING\n");
+ ts.exec("setiter -ns thing2 -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n name", true);
+ ts.exec("listiter -ns thing2 -scan", true, "Summing", true);
+ ts.exec("deleteiter -ns thing2 -n name -scan", true);
+ ts.exec("createuser dude");
+ ts.exec("pass");
+ ts.exec("pass");
+ ts.exec("grant Namespace.CREATE_TABLE -ns thing2 -u dude", true);
+ ts.exec("revoke Namespace.CREATE_TABLE -ns thing2 -u dude", true);
+
+ // properties override and such
+ ts.exec("config -ns thing2 -s table.file.max=44444", true);
+ ts.exec("config -ns thing2", true, "44444", true);
+ ts.exec("config -t thing2.thingy", true, "44444", true);
+ ts.exec("config -t thing2.thingy -s table.file.max=55555", true);
+ ts.exec("config -t thing2.thingy", true, "55555", true);
+
+ // can copy properties when creating
+ ts.exec("createnamespace thing3 -cc thing2", true);
+ ts.exec("config -ns thing3", true, "44444", true);
+
+ ts.exec("deletenamespace -f thing2", true);
+ ts.exec("namespaces", true, "thing2", false);
+ ts.exec("tables", true, "thing2.thingy", false);
+
+ // put constraints on a namespace
+ ts.exec("constraint -ns thing3 -a org.apache.accumulo.examples.simple.constraints.NumericValueConstraint", true);
+ ts.exec("createtable thing3.constrained", true);
+ ts.exec("table thing3.constrained", true);
+ ts.exec("constraint -d 1");
+ // should fail
+ ts.exec("constraint -l", true, "NumericValueConstraint", true);
+ ts.exec("insert r cf cq abc", false);
+ ts.exec("constraint -ns thing3 -d 1");
+ ts.exec("sleep 1");
+ ts.exec("insert r cf cq abc", true);
+ }
+
+ private int countkeys(String table) throws IOException {
+ ts.exec("scan -np -t " + table);
+ return ts.output.get().split("\n").length - 1;
+ }
+
+ @Test
+ public void scans() throws Exception {
+ ts.exec("createtable t");
+ make10();
+ String result = ts.exec("scan -np -b row1 -e row1");
+ assertEquals(2, result.split("\n").length);
+ result = ts.exec("scan -np -b row3 -e row5");
+ assertEquals(4, result.split("\n").length);
+ result = ts.exec("scan -np -r row3");
+ assertEquals(2, result.split("\n").length);
+ result = ts.exec("scan -np -b row:");
+ assertEquals(1, result.split("\n").length);
+ result = ts.exec("scan -np -b row");
+ assertEquals(11, result.split("\n").length);
+ result = ts.exec("scan -np -e row:");
+ assertEquals(11, result.split("\n").length);
+ ts.exec("deletetable -f t");
+ }
+
+ @Test
+ public void whoami() throws Exception {
+ AuthenticationToken token = getToken();
+ assertTrue(ts.exec("whoami", true).contains(getPrincipal()));
+ // Unnecessary with Kerberos enabled, won't prompt for a password
+ if (token instanceof PasswordToken) {
+ ts.input.set("secret\nsecret\n");
+ }
+ ts.exec("createuser test_user");
+ ts.exec("setauths -u test_user -s 12,3,4");
+ String auths = ts.exec("getauths -u test_user");
+ assertTrue(auths.contains("3") && auths.contains("12") && auths.contains("4"));
+ // No support to switch users within the shell with Kerberos
+ if (token instanceof PasswordToken) {
+ ts.input.set("secret\n");
+ ts.exec("user test_user", true);
+ assertTrue(ts.exec("whoami", true).contains("test_user"));
+ ts.input.set(getRootPassword() + "\n");
+ ts.exec("user root", true);
+ }
+ }
+
+ private void make10() throws IOException {
+ for (int i = 0; i < 10; i++) {
+ ts.exec(String.format("insert row%d cf col%d value", i, i));
+ }
+ }
+
+ private List<String> getFiles(String tableId) throws IOException {
+ ts.output.clear();
+
+ ts.exec("scan -t " + MetadataTable.NAME + " -np -c file -b " + tableId + " -e " + tableId + "~");
+
+ log.debug("countFiles(): " + ts.output.get());
+
+ String[] lines = StringUtils.split(ts.output.get(), "\n");
+ ts.output.clear();
+
+ if (0 == lines.length) {
+ return Collections.emptyList();
+ }
+
+ return Arrays.asList(Arrays.copyOfRange(lines, 1, lines.length));
+ }
+
+ private int countFiles(String tableId) throws IOException {
+ return getFiles(tableId).size();
+ }
+
+ private String getTableId(String tableName) throws Exception {
+ Connector conn = getConnector();
+
+ for (int i = 0; i < 5; i++) {
+ Map<String,String> nameToId = conn.tableOperations().tableIdMap();
+ if (nameToId.containsKey(tableName)) {
+ return nameToId.get(tableName);
+ } else {
+ Thread.sleep(1000);
+ }
+ }
+
+ fail("Could not find ID for table: " + tableName);
+ // Will never get here
+ return null;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/SplitCancelsMajCIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/SplitCancelsMajCIT.java b/test/src/main/java/org/apache/accumulo/test/SplitCancelsMajCIT.java
new file mode 100644
index 0000000..4cad3a7
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/SplitCancelsMajCIT.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.EnumSet;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.apache.accumulo.test.functional.SlowIterator;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+// ACCUMULO-2862
+public class SplitCancelsMajCIT extends SharedMiniClusterBase {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ final String tableName = getUniqueNames(1)[0];
+ final Connector c = getConnector();
+ c.tableOperations().create(tableName);
+ // majc should take 100 * .5 secs
+ IteratorSetting it = new IteratorSetting(100, SlowIterator.class);
+ SlowIterator.setSleepTime(it, 500);
+ c.tableOperations().attachIterator(tableName, it, EnumSet.of(IteratorScope.majc));
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ for (int i = 0; i < 100; i++) {
+ Mutation m = new Mutation("" + i);
+ m.put("", "", new Value());
+ bw.addMutation(m);
+ }
+ bw.flush();
+ // start majc
+ final AtomicReference<Exception> ex = new AtomicReference<Exception>();
+ Thread thread = new Thread() {
+ @Override
+ public void run() {
+ try {
+ c.tableOperations().compact(tableName, null, null, true, true);
+ } catch (Exception e) {
+ ex.set(e);
+ }
+ }
+ };
+ thread.start();
+
+ long now = System.currentTimeMillis();
+ UtilWaitThread.sleep(10 * 1000);
+ // split the table, interrupts the compaction
+ SortedSet<Text> partitionKeys = new TreeSet<Text>();
+ partitionKeys.add(new Text("10"));
+ c.tableOperations().addSplits(tableName, partitionKeys);
+ thread.join();
+ // wait for the restarted compaction
+ assertTrue(System.currentTimeMillis() - now > 59 * 1000);
+ if (ex.get() != null)
+ throw ex.get();
+ }
+}
[39/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java b/test/src/main/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
new file mode 100644
index 0000000..2b03780
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/MultiTableBatchWriterIT.java
@@ -0,0 +1,518 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MultiTableBatchWriter;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.TableOfflineException;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.impl.MultiTableBatchWriterImpl;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+public class MultiTableBatchWriterIT extends AccumuloClusterHarness {
+
+ private Connector connector;
+ private MultiTableBatchWriter mtbw;
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 5 * 60;
+ }
+
+ @Before
+ public void setUpArgs() throws AccumuloException, AccumuloSecurityException {
+ connector = getConnector();
+ mtbw = getMultiTableBatchWriter(60);
+ }
+
+ public MultiTableBatchWriter getMultiTableBatchWriter(long cacheTimeoutInSeconds) {
+ ClientContext context = new ClientContext(connector.getInstance(), new Credentials(getAdminPrincipal(), getAdminToken()), getCluster().getClientConfig());
+ return new MultiTableBatchWriterImpl(context, new BatchWriterConfig(), cacheTimeoutInSeconds, TimeUnit.SECONDS);
+ }
+
+ @Test
+ public void testTableRenameDataValidation() throws Exception {
+
+ try {
+ final String[] names = getUniqueNames(2);
+ final String table1 = names[0], table2 = names[1];
+
+ TableOperations tops = connector.tableOperations();
+ tops.create(table1);
+
+ BatchWriter bw1 = mtbw.getBatchWriter(table1);
+
+ Mutation m1 = new Mutation("foo");
+ m1.put("col1", "", "val1");
+
+ bw1.addMutation(m1);
+
+ tops.rename(table1, table2);
+ tops.create(table1);
+
+ BatchWriter bw2 = mtbw.getBatchWriter(table1);
+
+ Mutation m2 = new Mutation("bar");
+ m2.put("col1", "", "val1");
+
+ bw1.addMutation(m2);
+ bw2.addMutation(m2);
+
+ mtbw.close();
+
+ Map<Entry<String,String>,String> table1Expectations = new HashMap<Entry<String,String>,String>();
+ table1Expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
+
+ Map<Entry<String,String>,String> table2Expectations = new HashMap<Entry<String,String>,String>();
+ table2Expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
+ table2Expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
+
+ Scanner s = connector.createScanner(table1, new Authorizations());
+ s.setRange(new Range());
+ Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
+ for (Entry<Key,Value> entry : s) {
+ actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
+ }
+
+ Assert.assertEquals("Differing results for " + table1, table1Expectations, actual);
+
+ s = connector.createScanner(table2, new Authorizations());
+ s.setRange(new Range());
+ actual = new HashMap<Entry<String,String>,String>();
+ for (Entry<Key,Value> entry : s) {
+ actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
+ }
+
+ Assert.assertEquals("Differing results for " + table2, table2Expectations, actual);
+
+ } finally {
+ if (null != mtbw) {
+ mtbw.close();
+ }
+ }
+ }
+
+ @Test
+ public void testTableRenameSameWriters() throws Exception {
+
+ try {
+ final String[] names = getUniqueNames(4);
+ final String table1 = names[0], table2 = names[1];
+ final String newTable1 = names[2], newTable2 = names[3];
+
+ TableOperations tops = connector.tableOperations();
+ tops.create(table1);
+ tops.create(table2);
+
+ BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+ Mutation m1 = new Mutation("foo");
+ m1.put("col1", "", "val1");
+ m1.put("col2", "", "val2");
+
+ bw1.addMutation(m1);
+ bw2.addMutation(m1);
+
+ tops.rename(table1, newTable1);
+ tops.rename(table2, newTable2);
+
+ Mutation m2 = new Mutation("bar");
+ m2.put("col1", "", "val1");
+ m2.put("col2", "", "val2");
+
+ bw1.addMutation(m2);
+ bw2.addMutation(m2);
+
+ mtbw.close();
+
+ Map<Entry<String,String>,String> expectations = new HashMap<Entry<String,String>,String>();
+ expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
+ expectations.put(Maps.immutableEntry("foo", "col2"), "val2");
+ expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
+ expectations.put(Maps.immutableEntry("bar", "col2"), "val2");
+
+ for (String table : Arrays.asList(newTable1, newTable2)) {
+ Scanner s = connector.createScanner(table, new Authorizations());
+ s.setRange(new Range());
+ Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
+ for (Entry<Key,Value> entry : s) {
+ actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
+ }
+
+ Assert.assertEquals("Differing results for " + table, expectations, actual);
+ }
+ } finally {
+ if (null != mtbw) {
+ mtbw.close();
+ }
+ }
+ }
+
+ @Test
+ public void testTableRenameNewWriters() throws Exception {
+
+ try {
+ final String[] names = getUniqueNames(4);
+ final String table1 = names[0], table2 = names[1];
+ final String newTable1 = names[2], newTable2 = names[3];
+
+ TableOperations tops = connector.tableOperations();
+ tops.create(table1);
+ tops.create(table2);
+
+ BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+ Mutation m1 = new Mutation("foo");
+ m1.put("col1", "", "val1");
+ m1.put("col2", "", "val2");
+
+ bw1.addMutation(m1);
+ bw2.addMutation(m1);
+
+ tops.rename(table1, newTable1);
+
+ // MTBW is still caching this name to the correct table, but we should invalidate its cache
+ // after seeing the rename
+ try {
+ bw1 = mtbw.getBatchWriter(table1);
+ Assert.fail("Should not be able to find this table");
+ } catch (TableNotFoundException e) {
+ // pass
+ }
+
+ tops.rename(table2, newTable2);
+
+ try {
+ bw2 = mtbw.getBatchWriter(table2);
+ Assert.fail("Should not be able to find this table");
+ } catch (TableNotFoundException e) {
+ // pass
+ }
+
+ bw1 = mtbw.getBatchWriter(newTable1);
+ bw2 = mtbw.getBatchWriter(newTable2);
+
+ Mutation m2 = new Mutation("bar");
+ m2.put("col1", "", "val1");
+ m2.put("col2", "", "val2");
+
+ bw1.addMutation(m2);
+ bw2.addMutation(m2);
+
+ mtbw.close();
+
+ Map<Entry<String,String>,String> expectations = new HashMap<Entry<String,String>,String>();
+ expectations.put(Maps.immutableEntry("foo", "col1"), "val1");
+ expectations.put(Maps.immutableEntry("foo", "col2"), "val2");
+ expectations.put(Maps.immutableEntry("bar", "col1"), "val1");
+ expectations.put(Maps.immutableEntry("bar", "col2"), "val2");
+
+ for (String table : Arrays.asList(newTable1, newTable2)) {
+ Scanner s = connector.createScanner(table, new Authorizations());
+ s.setRange(new Range());
+ Map<Entry<String,String>,String> actual = new HashMap<Entry<String,String>,String>();
+ for (Entry<Key,Value> entry : s) {
+ actual.put(Maps.immutableEntry(entry.getKey().getRow().toString(), entry.getKey().getColumnFamily().toString()), entry.getValue().toString());
+ }
+
+ Assert.assertEquals("Differing results for " + table, expectations, actual);
+ }
+ } finally {
+ if (null != mtbw) {
+ mtbw.close();
+ }
+ }
+ }
+
+ @Test
+ public void testTableRenameNewWritersNoCaching() throws Exception {
+ mtbw = getMultiTableBatchWriter(0);
+
+ try {
+ final String[] names = getUniqueNames(4);
+ final String table1 = names[0], table2 = names[1];
+ final String newTable1 = names[2], newTable2 = names[3];
+
+ TableOperations tops = connector.tableOperations();
+ tops.create(table1);
+ tops.create(table2);
+
+ BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+ Mutation m1 = new Mutation("foo");
+ m1.put("col1", "", "val1");
+ m1.put("col2", "", "val2");
+
+ bw1.addMutation(m1);
+ bw2.addMutation(m1);
+
+ tops.rename(table1, newTable1);
+ tops.rename(table2, newTable2);
+
+ try {
+ bw1 = mtbw.getBatchWriter(table1);
+ Assert.fail("Should not have gotten batchwriter for " + table1);
+ } catch (TableNotFoundException e) {
+ // Pass
+ }
+
+ try {
+ bw2 = mtbw.getBatchWriter(table2);
+ } catch (TableNotFoundException e) {
+ // Pass
+ }
+ } finally {
+ if (null != mtbw) {
+ mtbw.close();
+ }
+ }
+ }
+
+ @Test
+ public void testTableDelete() throws Exception {
+ boolean mutationsRejected = false;
+
+ try {
+ final String[] names = getUniqueNames(2);
+ final String table1 = names[0], table2 = names[1];
+
+ TableOperations tops = connector.tableOperations();
+ tops.create(table1);
+ tops.create(table2);
+
+ BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+ Mutation m1 = new Mutation("foo");
+ m1.put("col1", "", "val1");
+ m1.put("col2", "", "val2");
+
+ bw1.addMutation(m1);
+ bw2.addMutation(m1);
+
+ tops.delete(table1);
+ tops.delete(table2);
+
+ Mutation m2 = new Mutation("bar");
+ m2.put("col1", "", "val1");
+ m2.put("col2", "", "val2");
+
+ try {
+ bw1.addMutation(m2);
+ bw2.addMutation(m2);
+ } catch (MutationsRejectedException e) {
+ // Pass - Mutations might flush immediately
+ mutationsRejected = true;
+ }
+
+ } finally {
+ if (null != mtbw) {
+ try {
+ // Mutations might have flushed before the table offline occurred
+ mtbw.close();
+ } catch (MutationsRejectedException e) {
+ // Pass
+ mutationsRejected = true;
+ }
+ }
+ }
+
+ Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
+ }
+
+ @Test
+ public void testOfflineTable() throws Exception {
+ boolean mutationsRejected = false;
+
+ try {
+ final String[] names = getUniqueNames(2);
+ final String table1 = names[0], table2 = names[1];
+
+ TableOperations tops = connector.tableOperations();
+ tops.create(table1);
+ tops.create(table2);
+
+ BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+ Mutation m1 = new Mutation("foo");
+ m1.put("col1", "", "val1");
+ m1.put("col2", "", "val2");
+
+ bw1.addMutation(m1);
+ bw2.addMutation(m1);
+
+ tops.offline(table1, true);
+ tops.offline(table2, true);
+
+ Mutation m2 = new Mutation("bar");
+ m2.put("col1", "", "val1");
+ m2.put("col2", "", "val2");
+
+ try {
+ bw1.addMutation(m2);
+ bw2.addMutation(m2);
+ } catch (MutationsRejectedException e) {
+ // Pass -- Mutations might flush immediately and fail because of offline table
+ mutationsRejected = true;
+ }
+ } finally {
+ if (null != mtbw) {
+ try {
+ mtbw.close();
+ } catch (MutationsRejectedException e) {
+ // Pass
+ mutationsRejected = true;
+ }
+ }
+ }
+
+ Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
+ }
+
+ @Test
+ public void testOfflineTableWithCache() throws Exception {
+ boolean mutationsRejected = false;
+
+ try {
+ final String[] names = getUniqueNames(2);
+ final String table1 = names[0], table2 = names[1];
+
+ TableOperations tops = connector.tableOperations();
+ tops.create(table1);
+ tops.create(table2);
+
+ BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+ Mutation m1 = new Mutation("foo");
+ m1.put("col1", "", "val1");
+ m1.put("col2", "", "val2");
+
+ bw1.addMutation(m1);
+ bw2.addMutation(m1);
+
+ tops.offline(table1);
+
+ try {
+ bw1 = mtbw.getBatchWriter(table1);
+ } catch (TableOfflineException e) {
+ // pass
+ mutationsRejected = true;
+ }
+
+ tops.offline(table2);
+
+ try {
+ bw2 = mtbw.getBatchWriter(table2);
+ } catch (TableOfflineException e) {
+ // pass
+ mutationsRejected = true;
+ }
+ } finally {
+ if (null != mtbw) {
+ try {
+ // Mutations might have flushed before the table offline occurred
+ mtbw.close();
+ } catch (MutationsRejectedException e) {
+ // Pass
+ mutationsRejected = true;
+ }
+ }
+ }
+
+ Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
+ }
+
+ @Test
+ public void testOfflineTableWithoutCache() throws Exception {
+ mtbw = getMultiTableBatchWriter(0);
+ boolean mutationsRejected = false;
+
+ try {
+ final String[] names = getUniqueNames(2);
+ final String table1 = names[0], table2 = names[1];
+
+ TableOperations tops = connector.tableOperations();
+ tops.create(table1);
+ tops.create(table2);
+
+ BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);
+
+ Mutation m1 = new Mutation("foo");
+ m1.put("col1", "", "val1");
+ m1.put("col2", "", "val2");
+
+ bw1.addMutation(m1);
+ bw2.addMutation(m1);
+
+ // Mutations might or might not flush before tables goes offline
+ tops.offline(table1);
+ tops.offline(table2);
+
+ try {
+ bw1 = mtbw.getBatchWriter(table1);
+ Assert.fail(table1 + " should be offline");
+ } catch (TableOfflineException e) {
+ // pass
+ mutationsRejected = true;
+ }
+
+ try {
+ bw2 = mtbw.getBatchWriter(table2);
+ Assert.fail(table1 + " should be offline");
+ } catch (TableOfflineException e) {
+ // pass
+ mutationsRejected = true;
+ }
+ } finally {
+ if (null != mtbw) {
+ try {
+ // Mutations might have flushed before the table offline occurred
+ mtbw.close();
+ } catch (MutationsRejectedException e) {
+ // Pass
+ mutationsRejected = true;
+ }
+ }
+ }
+
+ Assert.assertTrue("Expected mutations to be rejected.", mutationsRejected);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/MultiTableRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/MultiTableRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/MultiTableRecoveryIT.java
new file mode 100644
index 0000000..37e4957
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/MultiTableRecoveryIT.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class MultiTableRecoveryIT extends ConfigurableMacBase {
+
+ @Override
+ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+
+ // use raw local file system so walogs sync and flush will work
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Test(timeout = 4 * 60 * 1000)
+ public void testRecoveryOverMultipleTables() throws Exception {
+ final int N = 3;
+ final Connector c = getConnector();
+ final String[] tables = getUniqueNames(N);
+ final BatchWriter[] writers = new BatchWriter[N];
+ final byte[][] values = new byte[N][];
+ int i = 0;
+ System.out.println("Creating tables");
+ for (String tableName : tables) {
+ c.tableOperations().create(tableName);
+ values[i] = Integer.toString(i).getBytes();
+ writers[i] = c.createBatchWriter(tableName, null);
+ i++;
+ }
+ System.out.println("Creating agitator");
+ final AtomicBoolean stop = new AtomicBoolean(false);
+ final Thread agitator = agitator(stop);
+ agitator.start();
+ System.out.println("writing");
+ final Random random = new Random();
+ for (i = 0; i < 1_000_000; i++) {
+ // make non-negative avoiding Math.abs, because that can still be negative
+ long randomRow = random.nextLong() & Long.MAX_VALUE;
+ assertTrue(randomRow >= 0);
+ final int table = (int) (randomRow % N);
+ final Mutation m = new Mutation(Long.toHexString(randomRow));
+ m.put(new byte[0], new byte[0], values[table]);
+ writers[table].addMutation(m);
+ if (i % 10_000 == 0) {
+ System.out.println("flushing");
+ for (int w = 0; w < N; w++) {
+ writers[w].flush();
+ }
+ }
+ }
+ System.out.println("closing");
+ for (int w = 0; w < N; w++) {
+ writers[w].close();
+ }
+ System.out.println("stopping the agitator");
+ stop.set(true);
+ agitator.join();
+ System.out.println("checking the data");
+ long count = 0;
+ for (int w = 0; w < N; w++) {
+ Scanner scanner = c.createScanner(tables[w], Authorizations.EMPTY);
+ for (Entry<Key,Value> entry : scanner) {
+ int value = Integer.parseInt(entry.getValue().toString());
+ assertEquals(w, value);
+ count++;
+ }
+ scanner.close();
+ }
+ assertEquals(1_000_000, count);
+ }
+
+ private Thread agitator(final AtomicBoolean stop) {
+ return new Thread() {
+ @Override
+ public void run() {
+ try {
+ int i = 0;
+ while (!stop.get()) {
+ UtilWaitThread.sleep(10 * 1000);
+ System.out.println("Restarting");
+ getCluster().getClusterControl().stop(ServerType.TABLET_SERVER);
+ getCluster().start();
+ // read the metadata table to know everything is back up
+ Iterators.size(getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
+ i++;
+ }
+ System.out.println("Restarted " + i + " times");
+ } catch (Exception ex) {
+ log.error("{}", ex.getMessage(), ex);
+ }
+ }
+ };
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java b/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
new file mode 100644
index 0000000..0ecdd0d
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/NamespacesIT.java
@@ -0,0 +1,1362 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.NamespaceExistsException;
+import org.apache.accumulo.core.client.NamespaceNotEmptyException;
+import org.apache.accumulo.core.client.NamespaceNotFoundException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.NamespaceOperations;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.impl.Namespaces;
+import org.apache.accumulo.core.client.impl.Tables;
+import org.apache.accumulo.core.client.impl.thrift.TableOperation;
+import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
+import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
+import org.apache.accumulo.core.client.security.SecurityErrorCode;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Filter;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.user.VersioningIterator;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.NamespacePermission;
+import org.apache.accumulo.core.security.SystemPermission;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+
+// Testing default namespace configuration with inheritance requires altering the system state and restoring it back to normal
+// Punt on this for now and just let it use a minicluster.
+public class NamespacesIT extends AccumuloClusterHarness {
+
+ private Connector c;
+ private String namespace;
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Before
+ public void setupConnectorAndNamespace() throws Exception {
+ Assume.assumeTrue(ClusterType.MINI == getClusterType());
+
+ // prepare a unique namespace and get a new root connector for each test
+ c = getConnector();
+ namespace = "ns_" + getUniqueNames(1)[0];
+ }
+
+ @After
+ public void swingMjölnir() throws Exception {
+ if (null == c) {
+ return;
+ }
+ // clean up any added tables, namespaces, and users, after each test
+ for (String t : c.tableOperations().list())
+ if (!Tables.qualify(t).getFirst().equals(Namespaces.ACCUMULO_NAMESPACE))
+ c.tableOperations().delete(t);
+ assertEquals(3, c.tableOperations().list().size());
+ for (String n : c.namespaceOperations().list())
+ if (!n.equals(Namespaces.ACCUMULO_NAMESPACE) && !n.equals(Namespaces.DEFAULT_NAMESPACE))
+ c.namespaceOperations().delete(n);
+ assertEquals(2, c.namespaceOperations().list().size());
+ for (String u : c.securityOperations().listLocalUsers())
+ if (!getAdminPrincipal().equals(u))
+ c.securityOperations().dropLocalUser(u);
+ assertEquals(1, c.securityOperations().listLocalUsers().size());
+ }
+
+ @Test
+ public void checkReservedNamespaces() throws Exception {
+ assertEquals(c.namespaceOperations().defaultNamespace(), Namespaces.DEFAULT_NAMESPACE);
+ assertEquals(c.namespaceOperations().systemNamespace(), Namespaces.ACCUMULO_NAMESPACE);
+ }
+
+ @Test
+ public void checkBuiltInNamespaces() throws Exception {
+ assertTrue(c.namespaceOperations().exists(Namespaces.DEFAULT_NAMESPACE));
+ assertTrue(c.namespaceOperations().exists(Namespaces.ACCUMULO_NAMESPACE));
+ }
+
+ @Test
+ public void createTableInDefaultNamespace() throws Exception {
+ String tableName = "1";
+ c.tableOperations().create(tableName);
+ assertTrue(c.tableOperations().exists(tableName));
+ }
+
+ @Test(expected = AccumuloException.class)
+ public void createTableInAccumuloNamespace() throws Exception {
+ String tableName = Namespaces.ACCUMULO_NAMESPACE + ".1";
+ assertFalse(c.tableOperations().exists(tableName));
+ c.tableOperations().create(tableName); // should fail
+ }
+
+ @Test(expected = AccumuloSecurityException.class)
+ public void deleteDefaultNamespace() throws Exception {
+ c.namespaceOperations().delete(Namespaces.DEFAULT_NAMESPACE); // should fail
+ }
+
+ @Test(expected = AccumuloSecurityException.class)
+ public void deleteAccumuloNamespace() throws Exception {
+ c.namespaceOperations().delete(Namespaces.ACCUMULO_NAMESPACE); // should fail
+ }
+
+ @Test
+ public void createTableInMissingNamespace() throws Exception {
+ String t = namespace + ".1";
+ assertFalse(c.namespaceOperations().exists(namespace));
+ assertFalse(c.tableOperations().exists(t));
+ try {
+ c.tableOperations().create(t);
+ fail();
+ } catch (AccumuloException e) {
+ assertEquals(NamespaceNotFoundException.class.getName(), e.getCause().getClass().getName());
+ assertFalse(c.namespaceOperations().exists(namespace));
+ assertFalse(c.tableOperations().exists(t));
+ }
+ }
+
+ @Test
+ public void createAndDeleteNamespace() throws Exception {
+ String t1 = namespace + ".1";
+ String t2 = namespace + ".2";
+ assertFalse(c.namespaceOperations().exists(namespace));
+ assertFalse(c.tableOperations().exists(t1));
+ assertFalse(c.tableOperations().exists(t2));
+ try {
+ c.namespaceOperations().delete(namespace);
+ } catch (NamespaceNotFoundException e) {}
+ try {
+ c.tableOperations().delete(t1);
+ } catch (TableNotFoundException e) {
+ assertEquals(NamespaceNotFoundException.class.getName(), e.getCause().getClass().getName());
+ }
+ c.namespaceOperations().create(namespace);
+ assertTrue(c.namespaceOperations().exists(namespace));
+ assertFalse(c.tableOperations().exists(t1));
+ assertFalse(c.tableOperations().exists(t2));
+ c.tableOperations().create(t1);
+ assertTrue(c.namespaceOperations().exists(namespace));
+ assertTrue(c.tableOperations().exists(t1));
+ assertFalse(c.tableOperations().exists(t2));
+ c.tableOperations().create(t2);
+ assertTrue(c.namespaceOperations().exists(namespace));
+ assertTrue(c.tableOperations().exists(t1));
+ assertTrue(c.tableOperations().exists(t2));
+ c.tableOperations().delete(t1);
+ assertTrue(c.namespaceOperations().exists(namespace));
+ assertFalse(c.tableOperations().exists(t1));
+ assertTrue(c.tableOperations().exists(t2));
+ c.tableOperations().delete(t2);
+ assertTrue(c.namespaceOperations().exists(namespace));
+ assertFalse(c.tableOperations().exists(t1));
+ assertFalse(c.tableOperations().exists(t2));
+ c.namespaceOperations().delete(namespace);
+ assertFalse(c.namespaceOperations().exists(namespace));
+ assertFalse(c.tableOperations().exists(t1));
+ assertFalse(c.tableOperations().exists(t2));
+ }
+
+ @Test(expected = NamespaceNotEmptyException.class)
+ public void deleteNonEmptyNamespace() throws Exception {
+ String tableName1 = namespace + ".1";
+ assertFalse(c.namespaceOperations().exists(namespace));
+ assertFalse(c.tableOperations().exists(tableName1));
+ c.namespaceOperations().create(namespace);
+ c.tableOperations().create(tableName1);
+ assertTrue(c.namespaceOperations().exists(namespace));
+ assertTrue(c.tableOperations().exists(tableName1));
+ c.namespaceOperations().delete(namespace); // should fail
+ }
+
+ @Test
+ public void verifyPropertyInheritance() throws Exception {
+ String t0 = "0";
+ String t1 = namespace + ".1";
+ String t2 = namespace + ".2";
+
+ String k = Property.TABLE_SCAN_MAXMEM.getKey();
+ String v = "42K";
+
+ assertFalse(c.namespaceOperations().exists(namespace));
+ assertFalse(c.tableOperations().exists(t1));
+ assertFalse(c.tableOperations().exists(t2));
+ c.namespaceOperations().create(namespace);
+ c.tableOperations().create(t1);
+ c.tableOperations().create(t0);
+ assertTrue(c.namespaceOperations().exists(namespace));
+ assertTrue(c.tableOperations().exists(t1));
+ assertTrue(c.tableOperations().exists(t0));
+
+ // verify no property
+ assertFalse(checkNamespaceHasProp(namespace, k, v));
+ assertFalse(checkTableHasProp(t1, k, v));
+ assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
+ assertFalse(checkTableHasProp(t0, k, v));
+
+ // set property and verify
+ c.namespaceOperations().setProperty(namespace, k, v);
+ assertTrue(checkNamespaceHasProp(namespace, k, v));
+ assertTrue(checkTableHasProp(t1, k, v));
+ assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
+ assertFalse(checkTableHasProp(t0, k, v));
+
+ // add a new table to namespace and verify
+ assertFalse(c.tableOperations().exists(t2));
+ c.tableOperations().create(t2);
+ assertTrue(c.tableOperations().exists(t2));
+ assertTrue(checkNamespaceHasProp(namespace, k, v));
+ assertTrue(checkTableHasProp(t1, k, v));
+ assertTrue(checkTableHasProp(t2, k, v));
+ assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
+ assertFalse(checkTableHasProp(t0, k, v));
+
+ // remove property and verify
+ c.namespaceOperations().removeProperty(namespace, k);
+ assertFalse(checkNamespaceHasProp(namespace, k, v));
+ assertFalse(checkTableHasProp(t1, k, v));
+ assertFalse(checkTableHasProp(t2, k, v));
+ assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
+ assertFalse(checkTableHasProp(t0, k, v));
+
+ // set property on default namespace and verify
+ c.namespaceOperations().setProperty(Namespaces.DEFAULT_NAMESPACE, k, v);
+ assertFalse(checkNamespaceHasProp(namespace, k, v));
+ assertFalse(checkTableHasProp(t1, k, v));
+ assertFalse(checkTableHasProp(t2, k, v));
+ assertTrue(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
+ assertTrue(checkTableHasProp(t0, k, v));
+
+ // test that table properties override namespace properties
+ String k2 = Property.TABLE_FILE_MAX.getKey();
+ String v2 = "42";
+ String table_v2 = "13";
+
+ // set new property on some
+ c.namespaceOperations().setProperty(namespace, k2, v2);
+ c.tableOperations().setProperty(t2, k2, table_v2);
+ assertTrue(checkNamespaceHasProp(namespace, k2, v2));
+ assertTrue(checkTableHasProp(t1, k2, v2));
+ assertTrue(checkTableHasProp(t2, k2, table_v2));
+
+ c.tableOperations().delete(t1);
+ c.tableOperations().delete(t2);
+ c.tableOperations().delete(t0);
+ c.namespaceOperations().delete(namespace);
+ }
+
+ @Test
+ public void verifyIteratorInheritance() throws Exception {
+ String t1 = namespace + ".1";
+ c.namespaceOperations().create(namespace);
+ c.tableOperations().create(t1);
+ String iterName = namespace + "_iter";
+
+ BatchWriter bw = c.createBatchWriter(t1, new BatchWriterConfig());
+ Mutation m = new Mutation("r");
+ m.put("a", "b", new Value("abcde".getBytes()));
+ bw.addMutation(m);
+ bw.flush();
+ bw.close();
+
+ IteratorSetting setting = new IteratorSetting(250, iterName, SimpleFilter.class.getName());
+
+ // verify can see inserted entry
+ Scanner s = c.createScanner(t1, Authorizations.EMPTY);
+ assertTrue(s.iterator().hasNext());
+ assertFalse(c.namespaceOperations().listIterators(namespace).containsKey(iterName));
+ assertFalse(c.tableOperations().listIterators(t1).containsKey(iterName));
+
+ // verify entry is filtered out (also, verify conflict checking API)
+ c.namespaceOperations().checkIteratorConflicts(namespace, setting, EnumSet.allOf(IteratorScope.class));
+ c.namespaceOperations().attachIterator(namespace, setting);
+ UtilWaitThread.sleep(2 * 1000);
+ try {
+ c.namespaceOperations().checkIteratorConflicts(namespace, setting, EnumSet.allOf(IteratorScope.class));
+ fail();
+ } catch (AccumuloException e) {
+ assertEquals(IllegalArgumentException.class.getName(), e.getCause().getClass().getName());
+ }
+ IteratorSetting setting2 = c.namespaceOperations().getIteratorSetting(namespace, setting.getName(), IteratorScope.scan);
+ assertEquals(setting, setting2);
+ assertTrue(c.namespaceOperations().listIterators(namespace).containsKey(iterName));
+ assertTrue(c.tableOperations().listIterators(t1).containsKey(iterName));
+ s = c.createScanner(t1, Authorizations.EMPTY);
+ assertFalse(s.iterator().hasNext());
+
+ // verify can see inserted entry again
+ c.namespaceOperations().removeIterator(namespace, setting.getName(), EnumSet.allOf(IteratorScope.class));
+ UtilWaitThread.sleep(2 * 1000);
+ assertFalse(c.namespaceOperations().listIterators(namespace).containsKey(iterName));
+ assertFalse(c.tableOperations().listIterators(t1).containsKey(iterName));
+ s = c.createScanner(t1, Authorizations.EMPTY);
+ assertTrue(s.iterator().hasNext());
+ }
+
+ @Test
+ public void cloneTable() throws Exception {
+ String namespace2 = namespace + "_clone";
+ String t1 = namespace + ".1";
+ String t2 = namespace + ".2";
+ String t3 = namespace2 + ".2";
+ String k1 = Property.TABLE_FILE_MAX.getKey();
+ String k2 = Property.TABLE_FILE_REPLICATION.getKey();
+ String k1v1 = "55";
+ String k1v2 = "66";
+ String k2v1 = "5";
+ String k2v2 = "6";
+
+ c.namespaceOperations().create(namespace);
+ c.tableOperations().create(t1);
+ assertTrue(c.tableOperations().exists(t1));
+ assertFalse(c.namespaceOperations().exists(namespace2));
+ assertFalse(c.tableOperations().exists(t2));
+ assertFalse(c.tableOperations().exists(t3));
+
+ try {
+ // try to clone before namespace exists
+ c.tableOperations().clone(t1, t3, false, null, null); // should fail
+ fail();
+ } catch (AccumuloException e) {
+ assertEquals(NamespaceNotFoundException.class.getName(), e.getCause().getClass().getName());
+ }
+
+ // try to clone before when target tables exist
+ c.namespaceOperations().create(namespace2);
+ c.tableOperations().create(t2);
+ c.tableOperations().create(t3);
+ for (String t : Arrays.asList(t2, t3)) {
+ try {
+ c.tableOperations().clone(t1, t, false, null, null); // should fail
+ fail();
+ } catch (TableExistsException e) {
+ c.tableOperations().delete(t);
+ }
+ }
+
+ assertTrue(c.tableOperations().exists(t1));
+ assertTrue(c.namespaceOperations().exists(namespace2));
+ assertFalse(c.tableOperations().exists(t2));
+ assertFalse(c.tableOperations().exists(t3));
+
+ // set property with different values in two namespaces and a separate property with different values on the table and both namespaces
+ assertFalse(checkNamespaceHasProp(namespace, k1, k1v1));
+ assertFalse(checkNamespaceHasProp(namespace2, k1, k1v2));
+ assertFalse(checkTableHasProp(t1, k1, k1v1));
+ assertFalse(checkTableHasProp(t1, k1, k1v2));
+ assertFalse(checkNamespaceHasProp(namespace, k2, k2v1));
+ assertFalse(checkNamespaceHasProp(namespace2, k2, k2v1));
+ assertFalse(checkTableHasProp(t1, k2, k2v1));
+ assertFalse(checkTableHasProp(t1, k2, k2v2));
+ c.namespaceOperations().setProperty(namespace, k1, k1v1);
+ c.namespaceOperations().setProperty(namespace2, k1, k1v2);
+ c.namespaceOperations().setProperty(namespace, k2, k2v1);
+ c.namespaceOperations().setProperty(namespace2, k2, k2v1);
+ c.tableOperations().setProperty(t1, k2, k2v2);
+ assertTrue(checkNamespaceHasProp(namespace, k1, k1v1));
+ assertTrue(checkNamespaceHasProp(namespace2, k1, k1v2));
+ assertTrue(checkTableHasProp(t1, k1, k1v1));
+ assertFalse(checkTableHasProp(t1, k1, k1v2));
+ assertTrue(checkNamespaceHasProp(namespace, k2, k2v1));
+ assertTrue(checkNamespaceHasProp(namespace2, k2, k2v1));
+ assertFalse(checkTableHasProp(t1, k2, k2v1));
+ assertTrue(checkTableHasProp(t1, k2, k2v2));
+
+ // clone twice, once in same namespace, once in another
+ for (String t : Arrays.asList(t2, t3))
+ c.tableOperations().clone(t1, t, false, null, null);
+
+ assertTrue(c.namespaceOperations().exists(namespace2));
+ assertTrue(c.tableOperations().exists(t1));
+ assertTrue(c.tableOperations().exists(t2));
+ assertTrue(c.tableOperations().exists(t3));
+
+ // verify the properties got transferred
+ assertTrue(checkTableHasProp(t1, k1, k1v1));
+ assertTrue(checkTableHasProp(t2, k1, k1v1));
+ assertTrue(checkTableHasProp(t3, k1, k1v2));
+ assertTrue(checkTableHasProp(t1, k2, k2v2));
+ assertTrue(checkTableHasProp(t2, k2, k2v2));
+ assertTrue(checkTableHasProp(t3, k2, k2v2));
+ }
+
+ @Test
+ public void renameNamespaceWithTable() throws Exception {
+ String namespace2 = namespace + "_renamed";
+ String t1 = namespace + ".t";
+ String t2 = namespace2 + ".t";
+
+ c.namespaceOperations().create(namespace);
+ c.tableOperations().create(t1);
+ assertTrue(c.namespaceOperations().exists(namespace));
+ assertTrue(c.tableOperations().exists(t1));
+ assertFalse(c.namespaceOperations().exists(namespace2));
+ assertFalse(c.tableOperations().exists(t2));
+
+ String namespaceId = c.namespaceOperations().namespaceIdMap().get(namespace);
+ String tableId = c.tableOperations().tableIdMap().get(t1);
+
+ c.namespaceOperations().rename(namespace, namespace2);
+ assertFalse(c.namespaceOperations().exists(namespace));
+ assertFalse(c.tableOperations().exists(t1));
+ assertTrue(c.namespaceOperations().exists(namespace2));
+ assertTrue(c.tableOperations().exists(t2));
+
+ // verify id's didn't change
+ String namespaceId2 = c.namespaceOperations().namespaceIdMap().get(namespace2);
+ String tableId2 = c.tableOperations().tableIdMap().get(t2);
+
+ assertEquals(namespaceId, namespaceId2);
+ assertEquals(tableId, tableId2);
+ }
+
+ @Test
+ public void verifyConstraintInheritance() throws Exception {
+ String t1 = namespace + ".1";
+ c.namespaceOperations().create(namespace);
+ c.tableOperations().create(t1, new NewTableConfiguration().withoutDefaultIterators());
+ String constraintClassName = NumericValueConstraint.class.getName();
+
+ assertFalse(c.namespaceOperations().listConstraints(namespace).containsKey(constraintClassName));
+ assertFalse(c.tableOperations().listConstraints(t1).containsKey(constraintClassName));
+
+ c.namespaceOperations().addConstraint(namespace, constraintClassName);
+ assertTrue(c.namespaceOperations().listConstraints(namespace).containsKey(constraintClassName));
+ assertTrue(c.tableOperations().listConstraints(t1).containsKey(constraintClassName));
+ int num = c.namespaceOperations().listConstraints(namespace).get(constraintClassName);
+ assertEquals(num, (int) c.tableOperations().listConstraints(t1).get(constraintClassName));
+ // doesn't take effect immediately, needs time to propagate to tserver's ZooKeeper cache
+ UtilWaitThread.sleep(250);
+
+ Mutation m1 = new Mutation("r1");
+ Mutation m2 = new Mutation("r2");
+ Mutation m3 = new Mutation("r3");
+ m1.put("a", "b", new Value("abcde".getBytes(UTF_8)));
+ m2.put("e", "f", new Value("123".getBytes(UTF_8)));
+ m3.put("c", "d", new Value("zyxwv".getBytes(UTF_8)));
+ BatchWriter bw = c.createBatchWriter(t1, new BatchWriterConfig());
+ bw.addMutations(Arrays.asList(m1, m2, m3));
+ try {
+ bw.close();
+ fail();
+ } catch (MutationsRejectedException e) {
+ assertEquals(1, e.getConstraintViolationSummaries().size());
+ assertEquals(2, e.getConstraintViolationSummaries().get(0).getNumberOfViolatingMutations());
+ }
+ c.namespaceOperations().removeConstraint(namespace, num);
+ assertFalse(c.namespaceOperations().listConstraints(namespace).containsKey(constraintClassName));
+ assertFalse(c.tableOperations().listConstraints(t1).containsKey(constraintClassName));
+ // doesn't take effect immediately, needs time to propagate to tserver's ZooKeeper cache
+ UtilWaitThread.sleep(250);
+
+ bw = c.createBatchWriter(t1, new BatchWriterConfig());
+ bw.addMutations(Arrays.asList(m1, m2, m3));
+ bw.close();
+ }
+
+ @Test
+ public void renameTable() throws Exception {
+ String namespace2 = namespace + "_renamed";
+ String t1 = namespace + ".1";
+ String t2 = namespace2 + ".2";
+ String t3 = namespace + ".3";
+ String t4 = namespace + ".4";
+ String t5 = "5";
+
+ c.namespaceOperations().create(namespace);
+ c.namespaceOperations().create(namespace2);
+
+ assertTrue(c.namespaceOperations().exists(namespace));
+ assertTrue(c.namespaceOperations().exists(namespace2));
+ assertFalse(c.tableOperations().exists(t1));
+ assertFalse(c.tableOperations().exists(t2));
+ assertFalse(c.tableOperations().exists(t3));
+ assertFalse(c.tableOperations().exists(t4));
+ assertFalse(c.tableOperations().exists(t5));
+
+ c.tableOperations().create(t1);
+
+ try {
+ c.tableOperations().rename(t1, t2);
+ fail();
+ } catch (AccumuloException e) {
+ // this is expected, because we don't allow renames across namespaces
+ assertEquals(ThriftTableOperationException.class.getName(), e.getCause().getClass().getName());
+ assertEquals(TableOperation.RENAME, ((ThriftTableOperationException) e.getCause()).getOp());
+ assertEquals(TableOperationExceptionType.INVALID_NAME, ((ThriftTableOperationException) e.getCause()).getType());
+ }
+
+ try {
+ c.tableOperations().rename(t1, t5);
+ fail();
+ } catch (AccumuloException e) {
+ // this is expected, because we don't allow renames across namespaces
+ assertEquals(ThriftTableOperationException.class.getName(), e.getCause().getClass().getName());
+ assertEquals(TableOperation.RENAME, ((ThriftTableOperationException) e.getCause()).getOp());
+ assertEquals(TableOperationExceptionType.INVALID_NAME, ((ThriftTableOperationException) e.getCause()).getType());
+ }
+
+ assertTrue(c.tableOperations().exists(t1));
+ assertFalse(c.tableOperations().exists(t2));
+ assertFalse(c.tableOperations().exists(t3));
+ assertFalse(c.tableOperations().exists(t4));
+ assertFalse(c.tableOperations().exists(t5));
+
+ // fully qualified rename
+ c.tableOperations().rename(t1, t3);
+ assertFalse(c.tableOperations().exists(t1));
+ assertFalse(c.tableOperations().exists(t2));
+ assertTrue(c.tableOperations().exists(t3));
+ assertFalse(c.tableOperations().exists(t4));
+ assertFalse(c.tableOperations().exists(t5));
+ }
+
+ private void loginAs(ClusterUser user) throws IOException {
+ user.getToken();
+ }
+
+ /**
+ * Tests new Namespace permissions as well as modifications to Table permissions because of namespaces. Checks each permission to first make sure the user
+ * doesn't have permission to perform the action, then root grants them the permission and we check to make sure they could perform the action.
+ */
+ @Test
+ public void testPermissions() throws Exception {
+ ClusterUser user1 = getUser(0), user2 = getUser(1), root = getAdminUser();
+ String u1 = user1.getPrincipal();
+ String u2 = user2.getPrincipal();
+ PasswordToken pass = (null != user1.getPassword() ? new PasswordToken(user1.getPassword()) : null);
+
+ String n1 = namespace;
+ String t1 = n1 + ".1";
+ String t2 = n1 + ".2";
+ String t3 = n1 + ".3";
+
+ String n2 = namespace + "_2";
+
+ loginAs(root);
+ c.namespaceOperations().create(n1);
+ c.tableOperations().create(t1);
+
+ c.securityOperations().createLocalUser(u1, pass);
+
+ loginAs(user1);
+ Connector user1Con = c.getInstance().getConnector(u1, user1.getToken());
+
+ try {
+ user1Con.tableOperations().create(t2);
+ fail();
+ } catch (AccumuloSecurityException e) {
+ expectPermissionDenied(e);
+ }
+
+ loginAs(root);
+ c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.CREATE_TABLE);
+ loginAs(user1);
+ user1Con.tableOperations().create(t2);
+ loginAs(root);
+ assertTrue(c.tableOperations().list().contains(t2));
+ c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.CREATE_TABLE);
+
+ loginAs(user1);
+ try {
+ user1Con.tableOperations().delete(t1);
+ fail();
+ } catch (AccumuloSecurityException e) {
+ expectPermissionDenied(e);
+ }
+
+ loginAs(root);
+ c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.DROP_TABLE);
+ loginAs(user1);
+ user1Con.tableOperations().delete(t1);
+ loginAs(root);
+ assertTrue(!c.tableOperations().list().contains(t1));
+ c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.DROP_TABLE);
+
+ c.tableOperations().create(t3);
+ BatchWriter bw = c.createBatchWriter(t3, null);
+ Mutation m = new Mutation("row");
+ m.put("cf", "cq", "value");
+ bw.addMutation(m);
+ bw.close();
+
+ loginAs(user1);
+ Iterator<Entry<Key,Value>> i = user1Con.createScanner(t3, new Authorizations()).iterator();
+ try {
+ i.next();
+ fail();
+ } catch (RuntimeException e) {
+ assertEquals(AccumuloSecurityException.class.getName(), e.getCause().getClass().getName());
+ expectPermissionDenied((AccumuloSecurityException) e.getCause());
+ }
+
+ loginAs(user1);
+ m = new Mutation(u1);
+ m.put("cf", "cq", "turtles");
+ bw = user1Con.createBatchWriter(t3, null);
+ try {
+ bw.addMutation(m);
+ bw.close();
+ fail();
+ } catch (MutationsRejectedException e) {
+ assertEquals(1, e.getSecurityErrorCodes().size());
+ assertEquals(1, e.getSecurityErrorCodes().entrySet().iterator().next().getValue().size());
+ switch (e.getSecurityErrorCodes().entrySet().iterator().next().getValue().iterator().next()) {
+ case PERMISSION_DENIED:
+ break;
+ default:
+ fail();
+ }
+ }
+
+ loginAs(root);
+ c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.READ);
+ loginAs(user1);
+ i = user1Con.createScanner(t3, new Authorizations()).iterator();
+ assertTrue(i.hasNext());
+ loginAs(root);
+ c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.READ);
+ c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.WRITE);
+
+ loginAs(user1);
+ m = new Mutation(u1);
+ m.put("cf", "cq", "turtles");
+ bw = user1Con.createBatchWriter(t3, null);
+ bw.addMutation(m);
+ bw.close();
+ loginAs(root);
+ c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.WRITE);
+
+ loginAs(user1);
+ try {
+ user1Con.tableOperations().setProperty(t3, Property.TABLE_FILE_MAX.getKey(), "42");
+ fail();
+ } catch (AccumuloSecurityException e) {
+ expectPermissionDenied(e);
+ }
+
+ loginAs(root);
+ c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.ALTER_TABLE);
+ loginAs(user1);
+ user1Con.tableOperations().setProperty(t3, Property.TABLE_FILE_MAX.getKey(), "42");
+ user1Con.tableOperations().removeProperty(t3, Property.TABLE_FILE_MAX.getKey());
+ loginAs(root);
+ c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.ALTER_TABLE);
+
+ loginAs(user1);
+ try {
+ user1Con.namespaceOperations().setProperty(n1, Property.TABLE_FILE_MAX.getKey(), "55");
+ fail();
+ } catch (AccumuloSecurityException e) {
+ expectPermissionDenied(e);
+ }
+
+ loginAs(root);
+ c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.ALTER_NAMESPACE);
+ loginAs(user1);
+ user1Con.namespaceOperations().setProperty(n1, Property.TABLE_FILE_MAX.getKey(), "42");
+ user1Con.namespaceOperations().removeProperty(n1, Property.TABLE_FILE_MAX.getKey());
+ loginAs(root);
+ c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.ALTER_NAMESPACE);
+
+ loginAs(root);
+ c.securityOperations().createLocalUser(u2, (root.getPassword() == null ? null : new PasswordToken(user2.getPassword())));
+ loginAs(user1);
+ try {
+ user1Con.securityOperations().grantNamespacePermission(u2, n1, NamespacePermission.ALTER_NAMESPACE);
+ fail();
+ } catch (AccumuloSecurityException e) {
+ expectPermissionDenied(e);
+ }
+
+ loginAs(root);
+ c.securityOperations().grantNamespacePermission(u1, n1, NamespacePermission.GRANT);
+ loginAs(user1);
+ user1Con.securityOperations().grantNamespacePermission(u2, n1, NamespacePermission.ALTER_NAMESPACE);
+ user1Con.securityOperations().revokeNamespacePermission(u2, n1, NamespacePermission.ALTER_NAMESPACE);
+ loginAs(root);
+ c.securityOperations().revokeNamespacePermission(u1, n1, NamespacePermission.GRANT);
+
+ loginAs(user1);
+ try {
+ user1Con.namespaceOperations().create(n2);
+ fail();
+ } catch (AccumuloSecurityException e) {
+ expectPermissionDenied(e);
+ }
+
+ loginAs(root);
+ c.securityOperations().grantSystemPermission(u1, SystemPermission.CREATE_NAMESPACE);
+ loginAs(user1);
+ user1Con.namespaceOperations().create(n2);
+ loginAs(root);
+ c.securityOperations().revokeSystemPermission(u1, SystemPermission.CREATE_NAMESPACE);
+
+ c.securityOperations().revokeNamespacePermission(u1, n2, NamespacePermission.DROP_NAMESPACE);
+ loginAs(user1);
+ try {
+ user1Con.namespaceOperations().delete(n2);
+ fail();
+ } catch (AccumuloSecurityException e) {
+ expectPermissionDenied(e);
+ }
+
+ loginAs(root);
+ c.securityOperations().grantSystemPermission(u1, SystemPermission.DROP_NAMESPACE);
+ loginAs(user1);
+ user1Con.namespaceOperations().delete(n2);
+ loginAs(root);
+ c.securityOperations().revokeSystemPermission(u1, SystemPermission.DROP_NAMESPACE);
+
+ loginAs(user1);
+ try {
+ user1Con.namespaceOperations().setProperty(n1, Property.TABLE_FILE_MAX.getKey(), "33");
+ fail();
+ } catch (AccumuloSecurityException e) {
+ expectPermissionDenied(e);
+ }
+
+ loginAs(root);
+ c.securityOperations().grantSystemPermission(u1, SystemPermission.ALTER_NAMESPACE);
+ loginAs(user1);
+ user1Con.namespaceOperations().setProperty(n1, Property.TABLE_FILE_MAX.getKey(), "33");
+ user1Con.namespaceOperations().removeProperty(n1, Property.TABLE_FILE_MAX.getKey());
+ loginAs(root);
+ c.securityOperations().revokeSystemPermission(u1, SystemPermission.ALTER_NAMESPACE);
+ }
+
+ @Test
+ public void verifySystemPropertyInheritance() throws Exception {
+ String t1 = "1";
+ String t2 = namespace + "." + t1;
+ c.tableOperations().create(t1);
+ c.namespaceOperations().create(namespace);
+ c.tableOperations().create(t2);
+
+ // verify iterator inheritance
+ _verifySystemPropertyInheritance(t1, t2, Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.sum", "20," + SimpleFilter.class.getName(), false);
+
+ // verify constraint inheritance
+ _verifySystemPropertyInheritance(t1, t2, Property.TABLE_CONSTRAINT_PREFIX.getKey() + "42", NumericValueConstraint.class.getName(), false);
+
+ // verify other inheritance
+ _verifySystemPropertyInheritance(t1, t2, Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "dummy", "dummy", true);
+ }
+
+ private void _verifySystemPropertyInheritance(String defaultNamespaceTable, String namespaceTable, String k, String v, boolean systemNamespaceShouldInherit)
+ throws Exception {
+ // nobody should have any of these properties yet
+ assertFalse(c.instanceOperations().getSystemConfiguration().containsValue(v));
+ assertFalse(checkNamespaceHasProp(Namespaces.ACCUMULO_NAMESPACE, k, v));
+ assertFalse(checkTableHasProp(RootTable.NAME, k, v));
+ assertFalse(checkTableHasProp(MetadataTable.NAME, k, v));
+ assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
+ assertFalse(checkTableHasProp(defaultNamespaceTable, k, v));
+ assertFalse(checkNamespaceHasProp(namespace, k, v));
+ assertFalse(checkTableHasProp(namespaceTable, k, v));
+
+ // set the filter, verify that accumulo namespace is the only one unaffected
+ c.instanceOperations().setProperty(k, v);
+ // doesn't take effect immediately, needs time to propagate to tserver's ZooKeeper cache
+ UtilWaitThread.sleep(250);
+ assertTrue(c.instanceOperations().getSystemConfiguration().containsValue(v));
+ assertEquals(systemNamespaceShouldInherit, checkNamespaceHasProp(Namespaces.ACCUMULO_NAMESPACE, k, v));
+ assertEquals(systemNamespaceShouldInherit, checkTableHasProp(RootTable.NAME, k, v));
+ assertEquals(systemNamespaceShouldInherit, checkTableHasProp(MetadataTable.NAME, k, v));
+ assertTrue(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
+ assertTrue(checkTableHasProp(defaultNamespaceTable, k, v));
+ assertTrue(checkNamespaceHasProp(namespace, k, v));
+ assertTrue(checkTableHasProp(namespaceTable, k, v));
+
+ // verify it is no longer inherited
+ c.instanceOperations().removeProperty(k);
+ // doesn't take effect immediately, needs time to propagate to tserver's ZooKeeper cache
+ UtilWaitThread.sleep(250);
+ assertFalse(c.instanceOperations().getSystemConfiguration().containsValue(v));
+ assertFalse(checkNamespaceHasProp(Namespaces.ACCUMULO_NAMESPACE, k, v));
+ assertFalse(checkTableHasProp(RootTable.NAME, k, v));
+ assertFalse(checkTableHasProp(MetadataTable.NAME, k, v));
+ assertFalse(checkNamespaceHasProp(Namespaces.DEFAULT_NAMESPACE, k, v));
+ assertFalse(checkTableHasProp(defaultNamespaceTable, k, v));
+ assertFalse(checkNamespaceHasProp(namespace, k, v));
+ assertFalse(checkTableHasProp(namespaceTable, k, v));
+ }
+
+ @Test
+ public void listNamespaces() throws Exception {
+ SortedSet<String> namespaces = c.namespaceOperations().list();
+ Map<String,String> map = c.namespaceOperations().namespaceIdMap();
+ assertEquals(2, namespaces.size());
+ assertEquals(2, map.size());
+ assertTrue(namespaces.contains(Namespaces.ACCUMULO_NAMESPACE));
+ assertTrue(namespaces.contains(Namespaces.DEFAULT_NAMESPACE));
+ assertFalse(namespaces.contains(namespace));
+ assertEquals(Namespaces.ACCUMULO_NAMESPACE_ID, map.get(Namespaces.ACCUMULO_NAMESPACE));
+ assertEquals(Namespaces.DEFAULT_NAMESPACE_ID, map.get(Namespaces.DEFAULT_NAMESPACE));
+ assertNull(map.get(namespace));
+
+ c.namespaceOperations().create(namespace);
+ namespaces = c.namespaceOperations().list();
+ map = c.namespaceOperations().namespaceIdMap();
+ assertEquals(3, namespaces.size());
+ assertEquals(3, map.size());
+ assertTrue(namespaces.contains(Namespaces.ACCUMULO_NAMESPACE));
+ assertTrue(namespaces.contains(Namespaces.DEFAULT_NAMESPACE));
+ assertTrue(namespaces.contains(namespace));
+ assertEquals(Namespaces.ACCUMULO_NAMESPACE_ID, map.get(Namespaces.ACCUMULO_NAMESPACE));
+ assertEquals(Namespaces.DEFAULT_NAMESPACE_ID, map.get(Namespaces.DEFAULT_NAMESPACE));
+ assertNotNull(map.get(namespace));
+
+ c.namespaceOperations().delete(namespace);
+ namespaces = c.namespaceOperations().list();
+ map = c.namespaceOperations().namespaceIdMap();
+ assertEquals(2, namespaces.size());
+ assertEquals(2, map.size());
+ assertTrue(namespaces.contains(Namespaces.ACCUMULO_NAMESPACE));
+ assertTrue(namespaces.contains(Namespaces.DEFAULT_NAMESPACE));
+ assertFalse(namespaces.contains(namespace));
+ assertEquals(Namespaces.ACCUMULO_NAMESPACE_ID, map.get(Namespaces.ACCUMULO_NAMESPACE));
+ assertEquals(Namespaces.DEFAULT_NAMESPACE_ID, map.get(Namespaces.DEFAULT_NAMESPACE));
+ assertNull(map.get(namespace));
+ }
+
+ @Test
+ public void loadClass() throws Exception {
+ assertTrue(c.namespaceOperations().testClassLoad(Namespaces.DEFAULT_NAMESPACE, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName()));
+ assertFalse(c.namespaceOperations().testClassLoad(Namespaces.DEFAULT_NAMESPACE, "dummy", SortedKeyValueIterator.class.getName()));
+ try {
+ c.namespaceOperations().testClassLoad(namespace, "dummy", "dummy");
+ fail();
+ } catch (NamespaceNotFoundException e) {
+ // expected, ignore
+ }
+ }
+
+ @Test
+ public void testModifyingPermissions() throws Exception {
+ String tableName = namespace + ".modify";
+ c.namespaceOperations().create(namespace);
+ c.tableOperations().create(tableName);
+ assertTrue(c.securityOperations().hasTablePermission(c.whoami(), tableName, TablePermission.READ));
+ c.securityOperations().revokeTablePermission(c.whoami(), tableName, TablePermission.READ);
+ assertFalse(c.securityOperations().hasTablePermission(c.whoami(), tableName, TablePermission.READ));
+ c.securityOperations().grantTablePermission(c.whoami(), tableName, TablePermission.READ);
+ assertTrue(c.securityOperations().hasTablePermission(c.whoami(), tableName, TablePermission.READ));
+ c.tableOperations().delete(tableName);
+
+ try {
+ c.securityOperations().hasTablePermission(c.whoami(), tableName, TablePermission.READ);
+ fail();
+ } catch (Exception e) {
+ if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
+ throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
+ }
+
+ try {
+ c.securityOperations().grantTablePermission(c.whoami(), tableName, TablePermission.READ);
+ fail();
+ } catch (Exception e) {
+ if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
+ throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
+ }
+
+ try {
+ c.securityOperations().revokeTablePermission(c.whoami(), tableName, TablePermission.READ);
+ fail();
+ } catch (Exception e) {
+ if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
+ throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
+ }
+
+ assertTrue(c.securityOperations().hasNamespacePermission(c.whoami(), namespace, NamespacePermission.READ));
+ c.securityOperations().revokeNamespacePermission(c.whoami(), namespace, NamespacePermission.READ);
+ assertFalse(c.securityOperations().hasNamespacePermission(c.whoami(), namespace, NamespacePermission.READ));
+ c.securityOperations().grantNamespacePermission(c.whoami(), namespace, NamespacePermission.READ);
+ assertTrue(c.securityOperations().hasNamespacePermission(c.whoami(), namespace, NamespacePermission.READ));
+
+ c.namespaceOperations().delete(namespace);
+
+ try {
+ c.securityOperations().hasTablePermission(c.whoami(), tableName, TablePermission.READ);
+ fail();
+ } catch (Exception e) {
+ if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
+ throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
+ }
+
+ try {
+ c.securityOperations().grantTablePermission(c.whoami(), tableName, TablePermission.READ);
+ fail();
+ } catch (Exception e) {
+ if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
+ throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
+ }
+
+ try {
+ c.securityOperations().revokeTablePermission(c.whoami(), tableName, TablePermission.READ);
+ fail();
+ } catch (Exception e) {
+ if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.TABLE_DOESNT_EXIST))
+ throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
+ }
+
+ try {
+ c.securityOperations().hasNamespacePermission(c.whoami(), namespace, NamespacePermission.READ);
+ fail();
+ } catch (Exception e) {
+ if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.NAMESPACE_DOESNT_EXIST))
+ throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
+ }
+
+ try {
+ c.securityOperations().grantNamespacePermission(c.whoami(), namespace, NamespacePermission.READ);
+ fail();
+ } catch (Exception e) {
+ if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.NAMESPACE_DOESNT_EXIST))
+ throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
+ }
+
+ try {
+ c.securityOperations().revokeNamespacePermission(c.whoami(), namespace, NamespacePermission.READ);
+ fail();
+ } catch (Exception e) {
+ if (!(e instanceof AccumuloSecurityException) || !((AccumuloSecurityException) e).getSecurityErrorCode().equals(SecurityErrorCode.NAMESPACE_DOESNT_EXIST))
+ throw new Exception("Has permission resulted in " + e.getClass().getName(), e);
+ }
+
+ }
+
+ @Test
+ public void verifyTableOperationsExceptions() throws Exception {
+ String tableName = namespace + ".1";
+ IteratorSetting setting = new IteratorSetting(200, VersioningIterator.class);
+ Text a = new Text("a");
+ Text z = new Text("z");
+ TableOperations ops = c.tableOperations();
+
+ // this one doesn't throw an exception, so don't fail; just check that it works
+ assertFalse(ops.exists(tableName));
+
+ // table operations that should throw an AccumuloException caused by NamespaceNotFoundException
+ int numRun = 0;
+ ACCUMULOEXCEPTIONS_NAMESPACENOTFOUND: for (int i = 0;; ++i)
+ try {
+ switch (i) {
+ case 0:
+ ops.create(tableName);
+ fail();
+ break;
+ case 1:
+ ops.create("a");
+ ops.clone("a", tableName, true, Collections.<String,String> emptyMap(), Collections.<String> emptySet());
+ fail();
+ break;
+ case 2:
+ ops.importTable(tableName, System.getProperty("user.dir") + "/target");
+ fail();
+ break;
+ default:
+ // break out of infinite loop
+ assertEquals(3, i); // check test integrity
+ assertEquals(3, numRun); // check test integrity
+ break ACCUMULOEXCEPTIONS_NAMESPACENOTFOUND;
+ }
+ } catch (Exception e) {
+ numRun++;
+ if (!(e instanceof AccumuloException) || !(e.getCause() instanceof NamespaceNotFoundException))
+ throw new Exception("Case " + i + " resulted in " + e.getClass().getName(), e);
+ }
+
+ // table operations that should throw an AccumuloException caused by a TableNotFoundException caused by a NamespaceNotFoundException
+ // these are here because we didn't declare TableNotFoundException in the API :(
+ numRun = 0;
+ ACCUMULOEXCEPTIONS_TABLENOTFOUND: for (int i = 0;; ++i)
+ try {
+ switch (i) {
+ case 0:
+ ops.removeConstraint(tableName, 0);
+ fail();
+ break;
+ case 1:
+ ops.removeProperty(tableName, "a");
+ fail();
+ break;
+ case 2:
+ ops.setProperty(tableName, "a", "b");
+ fail();
+ break;
+ default:
+ // break out of infinite loop
+ assertEquals(3, i); // check test integrity
+ assertEquals(3, numRun); // check test integrity
+ break ACCUMULOEXCEPTIONS_TABLENOTFOUND;
+ }
+ } catch (Exception e) {
+ numRun++;
+ if (!(e instanceof AccumuloException) || !(e.getCause() instanceof TableNotFoundException)
+ || !(e.getCause().getCause() instanceof NamespaceNotFoundException))
+ throw new Exception("Case " + i + " resulted in " + e.getClass().getName(), e);
+ }
+
+ // table operations that should throw a TableNotFoundException caused by NamespaceNotFoundException
+ numRun = 0;
+ TABLENOTFOUNDEXCEPTIONS: for (int i = 0;; ++i)
+ try {
+ switch (i) {
+ case 0:
+ ops.addConstraint(tableName, NumericValueConstraint.class.getName());
+ fail();
+ break;
+ case 1:
+ ops.addSplits(tableName, new TreeSet<Text>());
+ fail();
+ break;
+ case 2:
+ ops.attachIterator(tableName, setting);
+ fail();
+ break;
+ case 3:
+ ops.cancelCompaction(tableName);
+ fail();
+ break;
+ case 4:
+ ops.checkIteratorConflicts(tableName, setting, EnumSet.allOf(IteratorScope.class));
+ fail();
+ break;
+ case 5:
+ ops.clearLocatorCache(tableName);
+ fail();
+ break;
+ case 6:
+ ops.clone(tableName, "2", true, Collections.<String,String> emptyMap(), Collections.<String> emptySet());
+ fail();
+ break;
+ case 7:
+ ops.compact(tableName, a, z, true, true);
+ fail();
+ break;
+ case 8:
+ ops.delete(tableName);
+ fail();
+ break;
+ case 9:
+ ops.deleteRows(tableName, a, z);
+ fail();
+ break;
+ case 10:
+ ops.splitRangeByTablets(tableName, new Range(), 10);
+ fail();
+ break;
+ case 11:
+ ops.exportTable(tableName, namespace + "_dir");
+ fail();
+ break;
+ case 12:
+ ops.flush(tableName, a, z, true);
+ fail();
+ break;
+ case 13:
+ ops.getDiskUsage(Collections.singleton(tableName));
+ fail();
+ break;
+ case 14:
+ ops.getIteratorSetting(tableName, "a", IteratorScope.scan);
+ fail();
+ break;
+ case 15:
+ ops.getLocalityGroups(tableName);
+ fail();
+ break;
+ case 16:
+ ops.getMaxRow(tableName, Authorizations.EMPTY, a, true, z, true);
+ fail();
+ break;
+ case 17:
+ ops.getProperties(tableName);
+ fail();
+ break;
+ case 18:
+ ops.importDirectory(tableName, "", "", false);
+ fail();
+ break;
+ case 19:
+ ops.testClassLoad(tableName, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
+ fail();
+ break;
+ case 20:
+ ops.listConstraints(tableName);
+ fail();
+ break;
+ case 21:
+ ops.listIterators(tableName);
+ fail();
+ break;
+ case 22:
+ ops.listSplits(tableName);
+ fail();
+ break;
+ case 23:
+ ops.merge(tableName, a, z);
+ fail();
+ break;
+ case 24:
+ ops.offline(tableName, true);
+ fail();
+ break;
+ case 25:
+ ops.online(tableName, true);
+ fail();
+ break;
+ case 26:
+ ops.removeIterator(tableName, "a", EnumSet.of(IteratorScope.scan));
+ fail();
+ break;
+ case 27:
+ ops.rename(tableName, tableName + "2");
+ fail();
+ break;
+ case 28:
+ ops.setLocalityGroups(tableName, Collections.<String,Set<Text>> emptyMap());
+ fail();
+ break;
+ default:
+ // break out of infinite loop
+ assertEquals(29, i); // check test integrity
+ assertEquals(29, numRun); // check test integrity
+ break TABLENOTFOUNDEXCEPTIONS;
+ }
+ } catch (Exception e) {
+ numRun++;
+ if (!(e instanceof TableNotFoundException) || !(e.getCause() instanceof NamespaceNotFoundException))
+ throw new Exception("Case " + i + " resulted in " + e.getClass().getName(), e);
+ }
+ }
+
+ @Test
+ public void verifyNamespaceOperationsExceptions() throws Exception {
+ IteratorSetting setting = new IteratorSetting(200, VersioningIterator.class);
+ NamespaceOperations ops = c.namespaceOperations();
+
+ // this one doesn't throw an exception, so don't fail; just check that it works
+ assertFalse(ops.exists(namespace));
+
+ // namespace operations that should throw a NamespaceNotFoundException
+ int numRun = 0;
+ NAMESPACENOTFOUND: for (int i = 0;; ++i)
+ try {
+ switch (i) {
+ case 0:
+ ops.addConstraint(namespace, NumericValueConstraint.class.getName());
+ fail();
+ break;
+ case 1:
+ ops.attachIterator(namespace, setting);
+ fail();
+ break;
+ case 2:
+ ops.checkIteratorConflicts(namespace, setting, EnumSet.of(IteratorScope.scan));
+ fail();
+ break;
+ case 3:
+ ops.delete(namespace);
+ fail();
+ break;
+ case 4:
+ ops.getIteratorSetting(namespace, "thing", IteratorScope.scan);
+ fail();
+ break;
+ case 5:
+ ops.getProperties(namespace);
+ fail();
+ break;
+ case 6:
+ ops.listConstraints(namespace);
+ fail();
+ break;
+ case 7:
+ ops.listIterators(namespace);
+ fail();
+ break;
+ case 8:
+ ops.removeConstraint(namespace, 1);
+ fail();
+ break;
+ case 9:
+ ops.removeIterator(namespace, "thing", EnumSet.allOf(IteratorScope.class));
+ fail();
+ break;
+ case 10:
+ ops.removeProperty(namespace, "a");
+ fail();
+ break;
+ case 11:
+ ops.rename(namespace, namespace + "2");
+ fail();
+ break;
+ case 12:
+ ops.setProperty(namespace, "k", "v");
+ fail();
+ break;
+ case 13:
+ ops.testClassLoad(namespace, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
+ fail();
+ break;
+ default:
+ // break out of infinite loop
+ assertEquals(14, i); // check test integrity
+ assertEquals(14, numRun); // check test integrity
+ break NAMESPACENOTFOUND;
+ }
+ } catch (Exception e) {
+ numRun++;
+ if (!(e instanceof NamespaceNotFoundException))
+ throw new Exception("Case " + i + " resulted in " + e.getClass().getName(), e);
+ }
+
+ // namespace operations that should throw a NamespaceExistsException
+ numRun = 0;
+ NAMESPACEEXISTS: for (int i = 0;; ++i)
+ try {
+ switch (i) {
+ case 0:
+ ops.create(namespace + "0");
+ ops.create(namespace + "0"); // should fail here
+ fail();
+ break;
+ case 1:
+ ops.create(namespace + i + "_1");
+ ops.create(namespace + i + "_2");
+ ops.rename(namespace + i + "_1", namespace + i + "_2"); // should fail here
+ fail();
+ break;
+ case 2:
+ ops.create(Namespaces.DEFAULT_NAMESPACE);
+ fail();
+ break;
+ case 3:
+ ops.create(Namespaces.ACCUMULO_NAMESPACE);
+ fail();
+ break;
+ case 4:
+ ops.create(namespace + i + "_1");
+ ops.rename(namespace + i + "_1", Namespaces.DEFAULT_NAMESPACE); // should fail here
+ fail();
+ break;
+ case 5:
+ ops.create(namespace + i + "_1");
+ ops.rename(namespace + i + "_1", Namespaces.ACCUMULO_NAMESPACE); // should fail here
+ fail();
+ break;
+ default:
+ // break out of infinite loop
+ assertEquals(6, i); // check test integrity
+ assertEquals(6, numRun); // check test integrity
+ break NAMESPACEEXISTS;
+ }
+ } catch (Exception e) {
+ numRun++;
+ if (!(e instanceof NamespaceExistsException))
+ throw new Exception("Case " + i + " resulted in " + e.getClass().getName(), e);
+ }
+ }
+
+ private boolean checkTableHasProp(String t, String propKey, String propVal) {
+ return checkHasProperty(t, propKey, propVal, true);
+ }
+
+ private boolean checkNamespaceHasProp(String n, String propKey, String propVal) {
+ return checkHasProperty(n, propKey, propVal, false);
+ }
+
+ private boolean checkHasProperty(String name, String propKey, String propVal, boolean nameIsTable) {
+ try {
+ Iterable<Entry<String,String>> iterable = nameIsTable ? c.tableOperations().getProperties(name) : c.namespaceOperations().getProperties(name);
+ for (Entry<String,String> e : iterable)
+ if (propKey.equals(e.getKey()))
+ return propVal.equals(e.getValue());
+ return false;
+ } catch (Exception e) {
+ fail();
+ return false;
+ }
+ }
+
+ public static class SimpleFilter extends Filter {
+ @Override
+ public boolean accept(Key k, Value v) {
+ if (k.getColumnFamily().toString().equals("a"))
+ return false;
+ return true;
+ }
+ }
+
+ private void expectPermissionDenied(AccumuloSecurityException sec) {
+ assertEquals(sec.getSecurityErrorCode().getClass(), SecurityErrorCode.class);
+ switch (sec.getSecurityErrorCode()) {
+ case PERMISSION_DENIED:
+ break;
+ default:
+ fail();
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java b/test/src/main/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java
new file mode 100644
index 0000000..60b3cf7
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/RecoveryCompactionsAreFlushesIT.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.cluster.ClusterControl;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+// Accumulo3010
+public class RecoveryCompactionsAreFlushesIT extends AccumuloClusterHarness {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ // file system supports recovery
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Test
+ public void test() throws Exception {
+ // create a table
+ String tableName = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "100");
+ c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "3");
+ // create 3 flush files
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("a");
+ m.put("b", "c", new Value("v".getBytes()));
+ for (int i = 0; i < 3; i++) {
+ bw.addMutation(m);
+ bw.flush();
+ c.tableOperations().flush(tableName, null, null, true);
+ }
+ // create an unsaved mutation
+ bw.addMutation(m);
+ bw.close();
+
+ ClusterControl control = cluster.getClusterControl();
+
+ // kill the tablet servers
+ control.stopAllServers(ServerType.TABLET_SERVER);
+
+ // recover
+ control.startAllServers(ServerType.TABLET_SERVER);
+
+ // ensure the table is readable
+ Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator());
+
+ // ensure that the recovery was not a merging minor compaction
+ Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+ for (Entry<Key,Value> entry : s) {
+ String filename = entry.getKey().getColumnQualifier().toString();
+ String parts[] = filename.split("/");
+ Assert.assertFalse(parts[parts.length - 1].startsWith("M"));
+ }
+ }
+
+}
[21/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/AccumuloOutputFormatIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/AccumuloOutputFormatIT.java b/test/src/test/java/org/apache/accumulo/test/AccumuloOutputFormatIT.java
deleted file mode 100644
index a2f522e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/AccumuloOutputFormatIT.java
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static com.google.common.base.Charsets.UTF_8;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.mapred.AccumuloOutputFormat;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.minicluster.MiniAccumuloCluster;
-import org.apache.accumulo.minicluster.MiniAccumuloConfig;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.RecordWriter;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.TemporaryFolder;
-
-import com.google.common.collect.Maps;
-
-/**
- * Prevent regression of ACCUMULO-3709. Exists as a mini test because mock instance doesn't produce this error when dynamically changing the table permissions.
- */
-public class AccumuloOutputFormatIT {
-
- private static final String TABLE = "abc";
- private MiniAccumuloCluster accumulo;
- private String secret = "secret";
-
- @Rule
- public TemporaryFolder folder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
-
- @Rule
- public ExpectedException exception = ExpectedException.none();
-
- @Before
- public void setUp() throws Exception {
- folder.create();
- MiniAccumuloConfig config = new MiniAccumuloConfig(folder.getRoot(), secret);
- Map<String,String> configMap = Maps.newHashMap();
- configMap.put(Property.TSERV_SESSION_MAXIDLE.toString(), "1");
- config.setSiteConfig(configMap);
- config.setNumTservers(1);
- accumulo = new MiniAccumuloCluster(config);
- accumulo.start();
- }
-
- @After
- public void tearDown() throws Exception {
- accumulo.stop();
- folder.delete();
- }
-
- @Test
- public void testMapred() throws Exception {
- ClientConfiguration clientConfig = accumulo.getClientConfig();
- ZooKeeperInstance instance = new ZooKeeperInstance(clientConfig);
- Connector connector = instance.getConnector("root", new PasswordToken(secret));
- // create a table and put some data in it
- connector.tableOperations().create(TABLE);
-
- JobConf job = new JobConf();
- BatchWriterConfig batchConfig = new BatchWriterConfig();
- // no flushes!!!!!
- batchConfig.setMaxLatency(0, TimeUnit.MILLISECONDS);
- // use a single thread to ensure our update session times out
- batchConfig.setMaxWriteThreads(1);
- // set the max memory so that we ensure we don't flush on the write.
- batchConfig.setMaxMemory(Long.MAX_VALUE);
- AccumuloOutputFormat outputFormat = new AccumuloOutputFormat();
- AccumuloOutputFormat.setBatchWriterOptions(job, batchConfig);
- AccumuloOutputFormat.setZooKeeperInstance(job, clientConfig);
- AccumuloOutputFormat.setConnectorInfo(job, "root", new PasswordToken(secret));
- RecordWriter<Text,Mutation> writer = outputFormat.getRecordWriter(null, job, "Test", null);
-
- try {
- for (int i = 0; i < 3; i++) {
- Mutation m = new Mutation(new Text(String.format("%08d", i)));
- for (int j = 0; j < 3; j++) {
- m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(UTF_8)));
- writer.write(new Text(TABLE), m);
- }
- }
-
- } catch (Exception e) {
- e.printStackTrace();
- // we don't want the exception to come from write
- }
-
- connector.securityOperations().revokeTablePermission("root", TABLE, TablePermission.WRITE);
-
- exception.expect(IOException.class);
- exception.expectMessage("PERMISSION_DENIED");
- writer.close(null);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java b/test/src/test/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
deleted file mode 100644
index 213ab59..0000000
--- a/test/src/test/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.harness.SharedMiniClusterBase;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ArbitraryTablePropertiesIT extends SharedMiniClusterBase {
- private static final Logger log = LoggerFactory.getLogger(ArbitraryTablePropertiesIT.class);
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 30;
- };
-
- // Test set, get, and remove arbitrary table properties on the root account
- @Test
- public void setGetRemoveTablePropertyRoot() throws Exception {
- log.debug("Starting setGetRemoveTablePropertyRoot test ------------------------");
-
- // make a table
- final String tableName = getUniqueNames(1)[0];
- final Connector conn = getConnector();
- conn.tableOperations().create(tableName);
-
- // Set variables for the property name to use and the initial value
- String propertyName = "table.custom.description";
- String description1 = "Description";
-
- // Make sure the property name is valid
- Assert.assertTrue(Property.isValidPropertyKey(propertyName));
- // Set the property to the desired value
- conn.tableOperations().setProperty(tableName, propertyName, description1);
-
- // Loop through properties to make sure the new property is added to the list
- int count = 0;
- for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
- if (property.getKey().equals(propertyName) && property.getValue().equals(description1))
- count++;
- }
- Assert.assertEquals(count, 1);
-
- // Set the property as something different
- String description2 = "set second";
- conn.tableOperations().setProperty(tableName, propertyName, description2);
-
- // / Loop through properties to make sure the new property is added to the list
- count = 0;
- for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
- if (property.getKey().equals(propertyName) && property.getValue().equals(description2))
- count++;
- }
- Assert.assertEquals(count, 1);
-
- // Remove the property and make sure there is no longer a value associated with it
- conn.tableOperations().removeProperty(tableName, propertyName);
-
- // / Loop through properties to make sure the new property is added to the list
- count = 0;
- for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
- if (property.getKey().equals(propertyName))
- count++;
- }
- Assert.assertEquals(count, 0);
- }
-
- // Tests set, get, and remove of user added arbitrary properties using a non-root account with permissions to alter tables
- @Test
- public void userSetGetRemoveTablePropertyWithPermission() throws Exception {
- log.debug("Starting userSetGetRemoveTablePropertyWithPermission test ------------------------");
-
- // Make a test username and password
- ClusterUser user = getUser(0);
- String testUser = user.getPrincipal();
- AuthenticationToken testToken = user.getToken();
-
- // Create a root user and create the table
- // Create a test user and grant that user permission to alter the table
- final String tableName = getUniqueNames(1)[0];
- final Connector c = getConnector();
- c.securityOperations().createLocalUser(testUser, (testToken instanceof PasswordToken ? (PasswordToken) testToken : null));
- c.tableOperations().create(tableName);
- c.securityOperations().grantTablePermission(testUser, tableName, TablePermission.ALTER_TABLE);
-
- // Set variables for the property name to use and the initial value
- String propertyName = "table.custom.description";
- String description1 = "Description";
-
- // Make sure the property name is valid
- Assert.assertTrue(Property.isValidPropertyKey(propertyName));
-
- // Getting a fresh token will ensure we're logged in as this user (if necessary)
- Connector testConn = c.getInstance().getConnector(testUser, user.getToken());
- // Set the property to the desired value
- testConn.tableOperations().setProperty(tableName, propertyName, description1);
-
- // Loop through properties to make sure the new property is added to the list
- int count = 0;
- for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
- if (property.getKey().equals(propertyName) && property.getValue().equals(description1))
- count++;
- }
- Assert.assertEquals(count, 1);
-
- // Set the property as something different
- String description2 = "set second";
- testConn.tableOperations().setProperty(tableName, propertyName, description2);
-
- // / Loop through properties to make sure the new property is added to the list
- count = 0;
- for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
- if (property.getKey().equals(propertyName) && property.getValue().equals(description2))
- count++;
- }
- Assert.assertEquals(count, 1);
-
- // Remove the property and make sure there is no longer a value associated with it
- testConn.tableOperations().removeProperty(tableName, propertyName);
-
- // / Loop through properties to make sure the new property is added to the list
- count = 0;
- for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
- if (property.getKey().equals(propertyName))
- count++;
- }
- Assert.assertEquals(count, 0);
-
- }
-
- // Tests set and get of user added arbitrary properties using a non-root account without permissions to alter tables
- @Test
- public void userSetGetTablePropertyWithoutPermission() throws Exception {
- log.debug("Starting userSetGetTablePropertyWithoutPermission test ------------------------");
-
- // Make a test username and password
- ClusterUser user = getUser(1);
- String testUser = user.getPrincipal();
- AuthenticationToken testToken = user.getToken();
-
- // Create a root user and create the table
- // Create a test user and grant that user permission to alter the table
- final String tableName = getUniqueNames(1)[0];
- final Connector c = getConnector();
- c.securityOperations().createLocalUser(testUser, (testToken instanceof PasswordToken ? (PasswordToken) testToken : null));
- c.tableOperations().create(tableName);
-
- // Set variables for the property name to use and the initial value
- String propertyName = "table.custom.description";
- String description1 = "Description";
-
- // Make sure the property name is valid
- Assert.assertTrue(Property.isValidPropertyKey(propertyName));
-
- // Getting a fresh token will ensure we're logged in as this user (if necessary)
- Connector testConn = c.getInstance().getConnector(testUser, user.getToken());
-
- // Try to set the property to the desired value.
- // If able to set it, the test fails, since permission was never granted
- try {
- testConn.tableOperations().setProperty(tableName, propertyName, description1);
- Assert.fail("Was able to set property without permissions");
- } catch (AccumuloSecurityException e) {}
-
- // Loop through properties to make sure the new property is not added to the list
- int count = 0;
- for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
- if (property.getKey().equals(propertyName))
- count++;
- }
- Assert.assertEquals(count, 0);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/AssignmentThreadsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/AssignmentThreadsIT.java b/test/src/test/java/org/apache/accumulo/test/AssignmentThreadsIT.java
deleted file mode 100644
index c9a83a6..0000000
--- a/test/src/test/java/org/apache/accumulo/test/AssignmentThreadsIT.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.Random;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-// ACCUMULO-1177
-public class AssignmentThreadsIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- cfg.setProperty(Property.TSERV_ASSIGNMENT_MAXCONCURRENT, "1");
- }
-
- // [0-9a-f]
- private final static byte[] HEXCHARS = {0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66};
- private final static Random random = new Random();
-
- public static byte[] randomHex(int n) {
- byte[] binary = new byte[n];
- byte[] hex = new byte[n * 2];
- random.nextBytes(binary);
- int count = 0;
- for (byte x : binary) {
- hex[count++] = HEXCHARS[(x >> 4) & 0xf];
- hex[count++] = HEXCHARS[x & 0xf];
- }
- return hex;
- }
-
- @Test(timeout = 5 * 60 * 1000)
- public void testConcurrentAssignmentPerformance() throws Exception {
- // make a table with a lot of splits
- String tableName = getUniqueNames(1)[0];
- Connector c = getConnector();
- log.info("Creating table");
- c.tableOperations().create(tableName);
- SortedSet<Text> splits = new TreeSet<Text>();
- for (int i = 0; i < 1000; i++) {
- splits.add(new Text(randomHex(8)));
- }
- log.info("Adding splits");
- c.tableOperations().addSplits(tableName, splits);
- log.info("Taking table offline");
- c.tableOperations().offline(tableName, true);
- // time how long it takes to load
- log.info("Bringing the table online");
- long now = System.currentTimeMillis();
- c.tableOperations().online(tableName, true);
- long diff = System.currentTimeMillis() - now;
- log.info("Loaded " + splits.size() + " tablets in " + diff + " ms");
- c.instanceOperations().setProperty(Property.TSERV_ASSIGNMENT_MAXCONCURRENT.getKey(), "20");
- now = System.currentTimeMillis();
- log.info("Taking table offline, again");
- c.tableOperations().offline(tableName, true);
- // wait >10 seconds for thread pool to update
- UtilWaitThread.sleep(Math.max(0, now + 11 * 1000 - System.currentTimeMillis()));
- now = System.currentTimeMillis();
- log.info("Bringing table back online");
- c.tableOperations().online(tableName, true);
- long diff2 = System.currentTimeMillis() - now;
- log.debug("Loaded " + splits.size() + " tablets in " + diff2 + " ms");
- assertTrue(diff2 < diff);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/AuditMessageIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/AuditMessageIT.java b/test/src/test/java/org/apache/accumulo/test/AuditMessageIT.java
deleted file mode 100644
index 1eb2373..0000000
--- a/test/src/test/java/org/apache/accumulo/test/AuditMessageIT.java
+++ /dev/null
@@ -1,506 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.SystemPermission;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.security.AuditedSecurityOperation;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.LineIterator;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * Tests that Accumulo is outputting audit messages as expected. Since this is using MiniAccumuloCluster, it could take a while if we test everything in
- * isolation. We test blocks of related operations, run the whole test in one MiniAccumulo instance, trying to clean up objects between each test. The
- * MiniAccumuloClusterTest sets up the log4j stuff differently to an installed instance, instead piping everything through stdout and writing to a set location
- * so we have to find the logs and grep the bits we need out.
- */
-public class AuditMessageIT extends ConfigurableMacBase {
-
- private static final String AUDIT_USER_1 = "AuditUser1";
- private static final String AUDIT_USER_2 = "AuditUser2";
- private static final String PASSWORD = "password";
- private static final String OLD_TEST_TABLE_NAME = "apples";
- private static final String NEW_TEST_TABLE_NAME = "oranges";
- private static final String THIRD_TEST_TABLE_NAME = "pears";
- private static final Authorizations auths = new Authorizations("private", "public");
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Override
- public void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws Exception {
- File f = new File(cfg.getConfDir(), "auditLog.xml");
- if (f.delete()) {
- log.debug("Deleted " + f);
- }
- }
-
- // Must be static to survive Junit re-initialising the class every time.
- private static String lastAuditTimestamp;
- private Connector auditConnector;
- private Connector conn;
-
- private static ArrayList<String> findAuditMessage(ArrayList<String> input, String pattern) {
- ArrayList<String> result = new ArrayList<String>();
- for (String s : input) {
- if (s.matches(".*" + pattern + ".*"))
- result.add(s);
- }
- return result;
- }
-
- /**
- * Returns a List of Audit messages that have been grep'd out of the MiniAccumuloCluster output.
- *
- * @param stepName
- * A unique name for the test being executed, to identify the System.out messages.
- * @return A List of the Audit messages, sorted (so in chronological order).
- */
- private ArrayList<String> getAuditMessages(String stepName) throws IOException {
- // ACCUMULO-3144 Make sure we give the processes enough time to flush the write buffer
- try {
- Thread.sleep(2000);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- throw new IOException("Interrupted waiting for data to be flushed to output streams");
- }
-
- for (MiniAccumuloClusterImpl.LogWriter lw : getCluster().getLogWriters()) {
- lw.flush();
- }
-
- // Grab the audit messages
- System.out.println("Start of captured audit messages for step " + stepName);
-
- ArrayList<String> result = new ArrayList<String>();
- File[] files = getCluster().getConfig().getLogDir().listFiles();
- assertNotNull(files);
- for (File file : files) {
- // We want to grab the files called .out
- if (file.getName().contains(".out") && file.isFile() && file.canRead()) {
- LineIterator it = FileUtils.lineIterator(file, UTF_8.name());
- try {
- while (it.hasNext()) {
- String line = it.nextLine();
- if (line.matches(".* \\[" + AuditedSecurityOperation.AUDITLOG + "\\s*\\].*")) {
- // Only include the message if startTimestamp is null. or the message occurred after the startTimestamp value
- if ((lastAuditTimestamp == null) || (line.substring(0, 23).compareTo(lastAuditTimestamp) > 0))
- result.add(line);
- }
- }
- } finally {
- LineIterator.closeQuietly(it);
- }
- }
- }
- Collections.sort(result);
-
- for (String s : result) {
- System.out.println(s);
- }
- System.out.println("End of captured audit messages for step " + stepName);
- if (result.size() > 0)
- lastAuditTimestamp = (result.get(result.size() - 1)).substring(0, 23);
-
- return result;
- }
-
- private void grantEverySystemPriv(Connector conn, String user) throws AccumuloSecurityException, AccumuloException {
- SystemPermission[] arrayOfP = new SystemPermission[] {SystemPermission.SYSTEM, SystemPermission.ALTER_TABLE, SystemPermission.ALTER_USER,
- SystemPermission.CREATE_TABLE, SystemPermission.CREATE_USER, SystemPermission.DROP_TABLE, SystemPermission.DROP_USER};
- for (SystemPermission p : arrayOfP) {
- conn.securityOperations().grantSystemPermission(user, p);
- }
- }
-
- @Before
- public void resetInstance() throws Exception {
- conn = getConnector();
-
- removeUsersAndTables();
-
- // This will set the lastAuditTimestamp for the first test
- getAuditMessages("setup");
- }
-
- @After
- public void removeUsersAndTables() throws Exception {
- for (String user : Arrays.asList(AUDIT_USER_1, AUDIT_USER_2)) {
- if (conn.securityOperations().listLocalUsers().contains(user)) {
- conn.securityOperations().dropLocalUser(user);
- }
- }
-
- TableOperations tops = conn.tableOperations();
- for (String table : Arrays.asList(THIRD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME, OLD_TEST_TABLE_NAME)) {
- if (tops.exists(table)) {
- tops.delete(table);
- }
- }
- }
-
- @Test
- public void testTableOperationsAudits() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException,
- InterruptedException {
-
- conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
- conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
- conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.CREATE_TABLE);
-
- // Connect as Audit User and do a bunch of stuff.
- // Testing activity begins here
- auditConnector = getCluster().getConnector(AUDIT_USER_1, new PasswordToken(PASSWORD));
- auditConnector.tableOperations().create(OLD_TEST_TABLE_NAME);
- auditConnector.tableOperations().rename(OLD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME);
- Map<String,String> emptyMap = Collections.emptyMap();
- Set<String> emptySet = Collections.emptySet();
- auditConnector.tableOperations().clone(NEW_TEST_TABLE_NAME, OLD_TEST_TABLE_NAME, true, emptyMap, emptySet);
- auditConnector.tableOperations().delete(OLD_TEST_TABLE_NAME);
- auditConnector.tableOperations().offline(NEW_TEST_TABLE_NAME);
- auditConnector.tableOperations().delete(NEW_TEST_TABLE_NAME);
- // Testing activity ends here
-
- ArrayList<String> auditMessages = getAuditMessages("testTableOperationsAudits");
-
- assertEquals(1, findAuditMessage(auditMessages, "action: createTable; targetTable: " + OLD_TEST_TABLE_NAME).size());
- assertEquals(1, findAuditMessage(auditMessages, "action: renameTable; targetTable: " + OLD_TEST_TABLE_NAME).size());
- assertEquals(1, findAuditMessage(auditMessages, "action: cloneTable; targetTable: " + NEW_TEST_TABLE_NAME).size());
- assertEquals(1, findAuditMessage(auditMessages, "action: deleteTable; targetTable: " + OLD_TEST_TABLE_NAME).size());
- assertEquals(1, findAuditMessage(auditMessages, "action: offlineTable; targetTable: " + NEW_TEST_TABLE_NAME).size());
- assertEquals(1, findAuditMessage(auditMessages, "action: deleteTable; targetTable: " + NEW_TEST_TABLE_NAME).size());
-
- }
-
- @Test
- public void testUserOperationsAudits() throws AccumuloSecurityException, AccumuloException, TableExistsException, InterruptedException, IOException {
-
- conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
- conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
- conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.CREATE_USER);
- grantEverySystemPriv(conn, AUDIT_USER_1);
-
- // Connect as Audit User and do a bunch of stuff.
- // Start testing activities here
- auditConnector = getCluster().getConnector(AUDIT_USER_1, new PasswordToken(PASSWORD));
- auditConnector.securityOperations().createLocalUser(AUDIT_USER_2, new PasswordToken(PASSWORD));
-
- // It seems only root can grant stuff.
- conn.securityOperations().grantSystemPermission(AUDIT_USER_2, SystemPermission.ALTER_TABLE);
- conn.securityOperations().revokeSystemPermission(AUDIT_USER_2, SystemPermission.ALTER_TABLE);
- auditConnector.tableOperations().create(NEW_TEST_TABLE_NAME);
- conn.securityOperations().grantTablePermission(AUDIT_USER_2, NEW_TEST_TABLE_NAME, TablePermission.READ);
- conn.securityOperations().revokeTablePermission(AUDIT_USER_2, NEW_TEST_TABLE_NAME, TablePermission.READ);
- auditConnector.securityOperations().changeLocalUserPassword(AUDIT_USER_2, new PasswordToken("anything"));
- auditConnector.securityOperations().changeUserAuthorizations(AUDIT_USER_2, auths);
- auditConnector.securityOperations().dropLocalUser(AUDIT_USER_2);
- // Stop testing activities here
-
- ArrayList<String> auditMessages = getAuditMessages("testUserOperationsAudits");
-
- assertEquals(1, findAuditMessage(auditMessages, "action: createUser; targetUser: " + AUDIT_USER_2).size());
- assertEquals(
- 1,
- findAuditMessage(auditMessages,
- "action: grantSystemPermission; permission: " + SystemPermission.ALTER_TABLE.toString() + "; targetUser: " + AUDIT_USER_2).size());
- assertEquals(
- 1,
- findAuditMessage(auditMessages,
- "action: revokeSystemPermission; permission: " + SystemPermission.ALTER_TABLE.toString() + "; targetUser: " + AUDIT_USER_2).size());
- assertEquals(
- 1,
- findAuditMessage(auditMessages,
- "action: grantTablePermission; permission: " + TablePermission.READ.toString() + "; targetTable: " + NEW_TEST_TABLE_NAME).size());
- assertEquals(
- 1,
- findAuditMessage(auditMessages,
- "action: revokeTablePermission; permission: " + TablePermission.READ.toString() + "; targetTable: " + NEW_TEST_TABLE_NAME).size());
- assertEquals(1, findAuditMessage(auditMessages, "action: changePassword; targetUser: " + AUDIT_USER_2 + "").size());
- assertEquals(1, findAuditMessage(auditMessages, "action: changeAuthorizations; targetUser: " + AUDIT_USER_2 + "; authorizations: " + auths.toString())
- .size());
- assertEquals(1, findAuditMessage(auditMessages, "action: dropUser; targetUser: " + AUDIT_USER_2).size());
- }
-
- @Test
- public void testImportExportOperationsAudits() throws AccumuloSecurityException, AccumuloException, TableExistsException, TableNotFoundException,
- IOException, InterruptedException {
-
- conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
- conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
- conn.securityOperations().changeUserAuthorizations(AUDIT_USER_1, auths);
- grantEverySystemPriv(conn, AUDIT_USER_1);
-
- // Connect as Audit User and do a bunch of stuff.
- // Start testing activities here
- auditConnector = getCluster().getConnector(AUDIT_USER_1, new PasswordToken(PASSWORD));
- auditConnector.tableOperations().create(OLD_TEST_TABLE_NAME);
-
- // Insert some play data
- BatchWriter bw = auditConnector.createBatchWriter(OLD_TEST_TABLE_NAME, new BatchWriterConfig());
- Mutation m = new Mutation("myRow");
- m.put("cf1", "cq1", "v1");
- m.put("cf1", "cq2", "v3");
- bw.addMutation(m);
- bw.close();
-
- // Prepare to export the table
- File exportDir = new File(getCluster().getConfig().getDir().toString() + "/export");
-
- auditConnector.tableOperations().offline(OLD_TEST_TABLE_NAME);
- auditConnector.tableOperations().exportTable(OLD_TEST_TABLE_NAME, exportDir.toString());
-
- // We've exported the table metadata to the MiniAccumuloCluster root dir. Grab the .rf file path to re-import it
- File distCpTxt = new File(exportDir.toString() + "/distcp.txt");
- File importFile = null;
- LineIterator it = FileUtils.lineIterator(distCpTxt, UTF_8.name());
-
- // Just grab the first rf file, it will do for now.
- String filePrefix = "file:";
- try {
- while (it.hasNext() && importFile == null) {
- String line = it.nextLine();
- if (line.matches(".*\\.rf")) {
- importFile = new File(line.replaceFirst(filePrefix, ""));
- }
- }
- } finally {
- LineIterator.closeQuietly(it);
- }
- FileUtils.copyFileToDirectory(importFile, exportDir);
- auditConnector.tableOperations().importTable(NEW_TEST_TABLE_NAME, exportDir.toString());
-
- // Now do a Directory (bulk) import of the same data.
- auditConnector.tableOperations().create(THIRD_TEST_TABLE_NAME);
- File failDir = new File(exportDir + "/tmp");
- assertTrue(failDir.mkdirs() || failDir.isDirectory());
- auditConnector.tableOperations().importDirectory(THIRD_TEST_TABLE_NAME, exportDir.toString(), failDir.toString(), false);
- auditConnector.tableOperations().online(OLD_TEST_TABLE_NAME);
-
- // Stop testing activities here
-
- ArrayList<String> auditMessages = getAuditMessages("testImportExportOperationsAudits");
-
- assertEquals(1, findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_CREATE_TABLE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME)).size());
- assertEquals(1,
- findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE, "offlineTable", OLD_TEST_TABLE_NAME))
- .size());
- assertEquals(1,
- findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_EXPORT_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME, exportDir.toString())).size());
- assertEquals(
- 1,
- findAuditMessage(auditMessages,
- String.format(AuditedSecurityOperation.CAN_IMPORT_AUDIT_TEMPLATE, NEW_TEST_TABLE_NAME, filePrefix + exportDir.toString())).size());
- assertEquals(1, findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_CREATE_TABLE_AUDIT_TEMPLATE, THIRD_TEST_TABLE_NAME)).size());
- assertEquals(
- 1,
- findAuditMessage(
- auditMessages,
- String.format(AuditedSecurityOperation.CAN_BULK_IMPORT_AUDIT_TEMPLATE, THIRD_TEST_TABLE_NAME, filePrefix + exportDir.toString(), filePrefix
- + failDir.toString())).size());
- assertEquals(1,
- findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE, "onlineTable", OLD_TEST_TABLE_NAME))
- .size());
-
- }
-
- @Test
- public void testDataOperationsAudits() throws AccumuloSecurityException, AccumuloException, TableExistsException, TableNotFoundException, IOException,
- InterruptedException {
-
- conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
- conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
- conn.securityOperations().changeUserAuthorizations(AUDIT_USER_1, auths);
- grantEverySystemPriv(conn, AUDIT_USER_1);
-
- // Connect as Audit User and do a bunch of stuff.
- // Start testing activities here
- auditConnector = getCluster().getConnector(AUDIT_USER_1, new PasswordToken(PASSWORD));
- auditConnector.tableOperations().create(OLD_TEST_TABLE_NAME);
-
- // Insert some play data
- BatchWriter bw = auditConnector.createBatchWriter(OLD_TEST_TABLE_NAME, new BatchWriterConfig());
- Mutation m = new Mutation("myRow");
- m.put("cf1", "cq1", "v1");
- m.put("cf1", "cq2", "v3");
- bw.addMutation(m);
- bw.close();
-
- // Start testing activities here
- // A regular scan
- Scanner scanner = auditConnector.createScanner(OLD_TEST_TABLE_NAME, auths);
- for (Map.Entry<Key,Value> entry : scanner) {
- System.out.println("Scanner row: " + entry.getKey() + " " + entry.getValue());
- }
- scanner.close();
-
- // A batch scan
- BatchScanner bs = auditConnector.createBatchScanner(OLD_TEST_TABLE_NAME, auths, 1);
- bs.fetchColumn(new Text("cf1"), new Text("cq1"));
- bs.setRanges(Arrays.asList(new Range("myRow", "myRow~")));
-
- for (Map.Entry<Key,Value> entry : bs) {
- System.out.println("BatchScanner row: " + entry.getKey() + " " + entry.getValue());
- }
- bs.close();
-
- // Delete some data.
- auditConnector.tableOperations().deleteRows(OLD_TEST_TABLE_NAME, new Text("myRow"), new Text("myRow~"));
-
- // End of testing activities
-
- ArrayList<String> auditMessages = getAuditMessages("testDataOperationsAudits");
- assertTrue(1 <= findAuditMessage(auditMessages, "action: scan; targetTable: " + OLD_TEST_TABLE_NAME).size());
- assertTrue(1 <= findAuditMessage(auditMessages, "action: scan; targetTable: " + OLD_TEST_TABLE_NAME).size());
- assertEquals(1,
- findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_DELETE_RANGE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME, "myRow", "myRow~")).size());
-
- }
-
- @Test
- public void testDeniedAudits() throws AccumuloSecurityException, AccumuloException, TableExistsException, TableNotFoundException, IOException,
- InterruptedException {
-
- // Create our user with no privs
- conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
- conn.tableOperations().create(OLD_TEST_TABLE_NAME);
- auditConnector = getCluster().getConnector(AUDIT_USER_1, new PasswordToken(PASSWORD));
-
- // Start testing activities
- // We should get denied or / failed audit messages here.
- // We don't want the thrown exceptions to stop our tests, and we are not testing that the Exceptions are thrown.
-
- try {
- auditConnector.tableOperations().create(NEW_TEST_TABLE_NAME);
- } catch (AccumuloSecurityException ex) {}
- try {
- auditConnector.tableOperations().rename(OLD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME);
- } catch (AccumuloSecurityException ex) {}
- try {
- auditConnector.tableOperations().clone(OLD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME, true, Collections.<String,String> emptyMap(),
- Collections.<String> emptySet());
- } catch (AccumuloSecurityException ex) {}
- try {
- auditConnector.tableOperations().delete(OLD_TEST_TABLE_NAME);
- } catch (AccumuloSecurityException ex) {}
- try {
- auditConnector.tableOperations().offline(OLD_TEST_TABLE_NAME);
- } catch (AccumuloSecurityException ex) {}
- try {
- Scanner scanner = auditConnector.createScanner(OLD_TEST_TABLE_NAME, auths);
- scanner.iterator().next().getKey();
- } catch (RuntimeException ex) {}
- try {
- auditConnector.tableOperations().deleteRows(OLD_TEST_TABLE_NAME, new Text("myRow"), new Text("myRow~"));
- } catch (AccumuloSecurityException ex) {}
-
- // ... that will do for now.
- // End of testing activities
-
- ArrayList<String> auditMessages = getAuditMessages("testDeniedAudits");
- assertEquals(1,
- findAuditMessage(auditMessages, "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_CREATE_TABLE_AUDIT_TEMPLATE, NEW_TEST_TABLE_NAME))
- .size());
- assertEquals(
- 1,
- findAuditMessage(auditMessages,
- "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_RENAME_TABLE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME)).size());
- assertEquals(
- 1,
- findAuditMessage(auditMessages,
- "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_CLONE_TABLE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME)).size());
- assertEquals(1,
- findAuditMessage(auditMessages, "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_DELETE_TABLE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME))
- .size());
- assertEquals(
- 1,
- findAuditMessage(auditMessages,
- "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE, "offlineTable", OLD_TEST_TABLE_NAME))
- .size());
- assertEquals(1, findAuditMessage(auditMessages, "operation: denied;.*" + "action: scan; targetTable: " + OLD_TEST_TABLE_NAME).size());
- assertEquals(
- 1,
- findAuditMessage(auditMessages,
- "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_DELETE_RANGE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME, "myRow", "myRow~")).size());
- }
-
- @Test
- public void testFailedAudits() throws AccumuloSecurityException, AccumuloException, TableExistsException, TableNotFoundException, IOException,
- InterruptedException {
-
- // Start testing activities
- // Test that we get a few "failed" audit messages come through when we tell it to do dumb stuff
- // We don't want the thrown exceptions to stop our tests, and we are not testing that the Exceptions are thrown.
- try {
- conn.securityOperations().dropLocalUser(AUDIT_USER_2);
- } catch (AccumuloSecurityException ex) {}
- try {
- conn.securityOperations().revokeSystemPermission(AUDIT_USER_2, SystemPermission.ALTER_TABLE);
- } catch (AccumuloSecurityException ex) {}
- try {
- conn.securityOperations().createLocalUser("root", new PasswordToken("super secret"));
- } catch (AccumuloSecurityException ex) {}
- ArrayList<String> auditMessages = getAuditMessages("testFailedAudits");
- // ... that will do for now.
- // End of testing activities
-
- assertEquals(1, findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.DROP_USER_AUDIT_TEMPLATE, AUDIT_USER_2)).size());
- assertEquals(
- 1,
- findAuditMessage(auditMessages,
- String.format(AuditedSecurityOperation.REVOKE_SYSTEM_PERMISSION_AUDIT_TEMPLATE, SystemPermission.ALTER_TABLE, AUDIT_USER_2)).size());
- assertEquals(1, findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CREATE_USER_AUDIT_TEMPLATE, "root", "")).size());
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java b/test/src/test/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
deleted file mode 100644
index 5b0b84d..0000000
--- a/test/src/test/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.InstanceOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.fate.zookeeper.ZooLock;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-// Accumulo3047
-public class BadDeleteMarkersCreatedIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(BadDeleteMarkersCreatedIT.class);
-
- @Override
- public int defaultTimeoutSeconds() {
- return 120;
- }
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
- cfg.setProperty(Property.GC_CYCLE_START, "0s");
- }
-
- private int timeoutFactor = 1;
-
- @Before
- public void getTimeoutFactor() {
- try {
- timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
- } catch (NumberFormatException e) {
- log.warn("Could not parse integer from timeout.factor");
- }
-
- Assert.assertTrue("timeout.factor must be greater than or equal to 1", timeoutFactor >= 1);
- }
-
- private String gcCycleDelay, gcCycleStart;
-
- @Before
- public void alterConfig() throws Exception {
- InstanceOperations iops = getConnector().instanceOperations();
- Map<String,String> config = iops.getSystemConfiguration();
- gcCycleDelay = config.get(Property.GC_CYCLE_DELAY.getKey());
- gcCycleStart = config.get(Property.GC_CYCLE_START.getKey());
- iops.setProperty(Property.GC_CYCLE_DELAY.getKey(), "1s");
- iops.setProperty(Property.GC_CYCLE_START.getKey(), "0s");
- log.info("Restarting garbage collector");
-
- getCluster().getClusterControl().stopAllServers(ServerType.GARBAGE_COLLECTOR);
-
- Instance instance = getConnector().getInstance();
- ZooCache zcache = new ZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
- zcache.clear();
- String path = ZooUtil.getRoot(instance) + Constants.ZGC_LOCK;
- byte[] gcLockData;
- do {
- gcLockData = ZooLock.getLockData(zcache, path, null);
- if (null != gcLockData) {
- log.info("Waiting for GC ZooKeeper lock to expire");
- Thread.sleep(2000);
- }
- } while (null != gcLockData);
-
- log.info("GC lock was lost");
-
- getCluster().getClusterControl().startAllServers(ServerType.GARBAGE_COLLECTOR);
- log.info("Garbage collector was restarted");
-
- gcLockData = null;
- do {
- gcLockData = ZooLock.getLockData(zcache, path, null);
- if (null == gcLockData) {
- log.info("Waiting for GC ZooKeeper lock to be acquired");
- Thread.sleep(2000);
- }
- } while (null == gcLockData);
-
- log.info("GC lock was acquired");
- }
-
- @After
- public void restoreConfig() throws Exception {
- InstanceOperations iops = getConnector().instanceOperations();
- if (null != gcCycleDelay) {
- iops.setProperty(Property.GC_CYCLE_DELAY.getKey(), gcCycleDelay);
- }
- if (null != gcCycleStart) {
- iops.setProperty(Property.GC_CYCLE_START.getKey(), gcCycleStart);
- }
- log.info("Restarting garbage collector");
- getCluster().getClusterControl().stopAllServers(ServerType.GARBAGE_COLLECTOR);
- getCluster().getClusterControl().startAllServers(ServerType.GARBAGE_COLLECTOR);
- log.info("Garbage collector was restarted");
- }
-
- @Test
- public void test() throws Exception {
- // make a table
- String tableName = getUniqueNames(1)[0];
- Connector c = getConnector();
- log.info("Creating table to be deleted");
- c.tableOperations().create(tableName);
- final String tableId = c.tableOperations().tableIdMap().get(tableName);
- Assert.assertNotNull("Expected to find a tableId", tableId);
-
- // add some splits
- SortedSet<Text> splits = new TreeSet<Text>();
- for (int i = 0; i < 10; i++) {
- splits.add(new Text("" + i));
- }
- c.tableOperations().addSplits(tableName, splits);
- // get rid of all the splits
- c.tableOperations().deleteRows(tableName, null, null);
- // get rid of the table
- c.tableOperations().delete(tableName);
- log.info("Sleeping to let garbage collector run");
- // let gc run
- UtilWaitThread.sleep(timeoutFactor * 15 * 1000);
- log.info("Verifying that delete markers were deleted");
- // look for delete markers
- Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- scanner.setRange(MetadataSchema.DeletesSection.getRange());
- for (Entry<Key,Value> entry : scanner) {
- String row = entry.getKey().getRow().toString();
- if (!row.contains("/" + tableId + "/")) {
- log.info("Ignoring delete entry for a table other than the one we deleted");
- continue;
- }
- Assert.fail("Delete entry should have been deleted by the garbage collector: " + entry.getKey().getRow().toString());
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/BalanceFasterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/BalanceFasterIT.java b/test/src/test/java/org/apache/accumulo/test/BalanceFasterIT.java
deleted file mode 100644
index bf9f5f0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/BalanceFasterIT.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-// ACCUMULO-2952
-public class BalanceFasterIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(3);
- }
-
- @Test(timeout = 90 * 1000)
- public void test() throws Exception {
- // create a table, add a bunch of splits
- String tableName = getUniqueNames(1)[0];
- Connector conn = getConnector();
- conn.tableOperations().create(tableName);
- SortedSet<Text> splits = new TreeSet<Text>();
- for (int i = 0; i < 1000; i++) {
- splits.add(new Text("" + i));
- }
- conn.tableOperations().addSplits(tableName, splits);
- // give a short wait for balancing
- UtilWaitThread.sleep(10 * 1000);
- // find out where the tabets are
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
- s.setRange(MetadataSchema.TabletsSection.getRange());
- Map<String,Integer> counts = new HashMap<String,Integer>();
- while (true) {
- int total = 0;
- counts.clear();
- for (Entry<Key,Value> kv : s) {
- String host = kv.getValue().toString();
- if (!counts.containsKey(host))
- counts.put(host, 0);
- counts.put(host, counts.get(host) + 1);
- total++;
- }
- // are enough tablets online?
- if (total > 1000)
- break;
- }
- // should be on all three servers
- assertTrue(counts.size() == 3);
- // and distributed evenly
- Iterator<Integer> i = counts.values().iterator();
- int a = i.next();
- int b = i.next();
- int c = i.next();
- assertTrue(Math.abs(a - b) < 3);
- assertTrue(Math.abs(a - c) < 3);
- assertTrue(a > 330);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/BalanceIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/BalanceIT.java b/test/src/test/java/org/apache/accumulo/test/BalanceIT.java
deleted file mode 100644
index 605ac94..0000000
--- a/test/src/test/java/org/apache/accumulo/test/BalanceIT.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class BalanceIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(BalanceIT.class);
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void testBalance() throws Exception {
- String tableName = getUniqueNames(1)[0];
- Connector c = getConnector();
- log.info("Creating table");
- c.tableOperations().create(tableName);
- SortedSet<Text> splits = new TreeSet<Text>();
- for (int i = 0; i < 10; i++) {
- splits.add(new Text("" + i));
- }
- log.info("Adding splits");
- c.tableOperations().addSplits(tableName, splits);
- log.info("Waiting for balance");
- c.instanceOperations().waitForBalance();
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java b/test/src/test/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java
deleted file mode 100644
index 9acefc4..0000000
--- a/test/src/test/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.util.SimpleThreadPool;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-// ACCUMULO-3692
-public class BalanceWithOfflineTableIT extends ConfigurableMacBase {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 30;
- }
-
- @Override
- protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {}
-
- @Test
- public void test() throws Exception {
- final String tableNames[] = getUniqueNames(2);
- final String tableName = tableNames[0];
- // create a table with a bunch of splits
-
- final Connector c = getConnector();
- log.info("Creating table " + tableName);
- c.tableOperations().create(tableName);
- ;
- final SortedSet<Text> splits = new TreeSet<>();
- for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
- splits.add(new Text(split));
- }
- log.info("Splitting table " + tableName);
- c.tableOperations().addSplits(tableName, splits);
- log.info("Balancing");
- c.instanceOperations().waitForBalance();
- log.info("Balanced");
-
- // create a new table which will unbalance the cluster
- final String table2 = tableNames[1];
- log.info("Creating table " + table2);
- c.tableOperations().create(table2);
- log.info("Creating splits " + table2);
- c.tableOperations().addSplits(table2, splits);
-
- // offline the table, hopefully while there are some migrations going on
- log.info("Offlining " + table2);
- c.tableOperations().offline(table2, true);
- log.info("Offlined " + table2);
-
- log.info("Waiting for balance");
-
- SimpleThreadPool pool = new SimpleThreadPool(1, "waitForBalance");
- Future<Boolean> wait = pool.submit(new Callable<Boolean>() {
- @Override
- public Boolean call() throws Exception {
- c.instanceOperations().waitForBalance();
- return true;
- }
- });
- wait.get(20, TimeUnit.SECONDS);
- log.info("Balance succeeded with an offline table");
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/BatchWriterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/BatchWriterIT.java b/test/src/test/java/org/apache/accumulo/test/BatchWriterIT.java
deleted file mode 100644
index 11fc595..0000000
--- a/test/src/test/java/org/apache/accumulo/test/BatchWriterIT.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.junit.Test;
-
-public class BatchWriterIT extends AccumuloClusterHarness {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 30;
- }
-
- @Test
- public void test() throws Exception {
- // call the batchwriter with buffer of size zero
- String table = getUniqueNames(1)[0];
- Connector c = getConnector();
- c.tableOperations().create(table);
- BatchWriterConfig config = new BatchWriterConfig();
- config.setMaxMemory(0);
- BatchWriter writer = c.createBatchWriter(table, config);
- Mutation m = new Mutation("row");
- m.put("cf", "cq", new Value("value".getBytes()));
- writer.addMutation(m);
- writer.close();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/BulkImportVolumeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/BulkImportVolumeIT.java b/test/src/test/java/org/apache/accumulo/test/BulkImportVolumeIT.java
deleted file mode 100644
index ce60893..0000000
--- a/test/src/test/java/org/apache/accumulo/test/BulkImportVolumeIT.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-// ACCUMULO-118/ACCUMULO-2504
-public class BulkImportVolumeIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(BulkImportVolumeIT.class);
-
- File volDirBase = null;
- Path v1, v2;
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- File baseDir = cfg.getDir();
- volDirBase = new File(baseDir, "volumes");
- File v1f = new File(volDirBase, "v1");
- File v2f = new File(volDirBase, "v2");
- v1 = new Path("file://" + v1f.getAbsolutePath());
- v2 = new Path("file://" + v2f.getAbsolutePath());
-
- // Run MAC on two locations in the local file system
- cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString());
-
- // use raw local file system so walogs sync and flush will work
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Test
- public void testBulkImportFailure() throws Exception {
- String tableName = getUniqueNames(1)[0];
- TableOperations to = getConnector().tableOperations();
- to.create(tableName);
- FileSystem fs = getFileSystem();
- Path rootPath = new Path(cluster.getTemporaryPath(), getClass().getName());
- Path bulk = new Path(rootPath, "bulk");
- log.info("bulk: {}", bulk);
- if (fs.exists(bulk)) {
- fs.delete(bulk, true);
- }
- assertTrue(fs.mkdirs(bulk));
- Path err = new Path(rootPath, "err");
- log.info("err: {}", err);
- if (fs.exists(err)) {
- fs.delete(err, true);
- }
- assertTrue(fs.mkdirs(err));
- Path bogus = new Path(bulk, "bogus.rf");
- fs.create(bogus).close();
- log.info("bogus: {}", bogus);
- assertTrue(fs.exists(bogus));
- FsShell fsShell = new FsShell(fs.getConf());
- assertEquals("Failed to chmod " + rootPath, 0, fsShell.run(new String[] {"-chmod", "-R", "777", rootPath.toString()}));
- log.info("Importing {} into {} with failures directory {}", bulk, tableName, err);
- to.importDirectory(tableName, bulk.toString(), err.toString(), false);
- assertEquals(1, fs.listStatus(err).length);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/CleanWalIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/CleanWalIT.java b/test/src/test/java/org/apache/accumulo/test/CleanWalIT.java
deleted file mode 100644
index 2474b3e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/CleanWalIT.java
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-public class CleanWalIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(CleanWalIT.class);
-
- @Override
- public int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "3s");
- cfg.setNumTservers(1);
- // use raw local file system so walogs sync and flush will work
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Before
- public void offlineTraceTable() throws Exception {
- Connector conn = getConnector();
- String traceTable = conn.instanceOperations().getSystemConfiguration().get(Property.TRACE_TABLE.getKey());
- if (conn.tableOperations().exists(traceTable)) {
- conn.tableOperations().offline(traceTable, true);
- }
- }
-
- @After
- public void onlineTraceTable() throws Exception {
- if (null != cluster) {
- Connector conn = getConnector();
- String traceTable = conn.instanceOperations().getSystemConfiguration().get(Property.TRACE_TABLE.getKey());
- if (conn.tableOperations().exists(traceTable)) {
- conn.tableOperations().online(traceTable, true);
- }
- }
- }
-
- // test for ACCUMULO-1830
- @Test
- public void test() throws Exception {
- Connector conn = getConnector();
- String tableName = getUniqueNames(1)[0];
- conn.tableOperations().create(tableName);
- BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("row");
- m.put("cf", "cq", "value");
- bw.addMutation(m);
- bw.close();
- getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- // all 3 tables should do recovery, but the bug doesn't really remove the log file references
-
- getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
-
- for (String table : new String[] {MetadataTable.NAME, RootTable.NAME})
- conn.tableOperations().flush(table, null, null, true);
- log.debug("Checking entries for " + tableName);
- assertEquals(1, count(tableName, conn));
- for (String table : new String[] {MetadataTable.NAME, RootTable.NAME}) {
- log.debug("Checking logs for " + table);
- assertEquals("Found logs for " + table, 0, countLogs(table, conn));
- }
-
- bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
- m = new Mutation("row");
- m.putDelete("cf", "cq");
- bw.addMutation(m);
- bw.close();
- assertEquals(0, count(tableName, conn));
- conn.tableOperations().flush(tableName, null, null, true);
- conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
- conn.tableOperations().flush(RootTable.NAME, null, null, true);
- try {
- getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- UtilWaitThread.sleep(3 * 1000);
- } finally {
- getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
- }
- assertEquals(0, count(tableName, conn));
- }
-
- private int countLogs(String tableName, Connector conn) throws TableNotFoundException {
- Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- scanner.fetchColumnFamily(MetadataSchema.TabletsSection.LogColumnFamily.NAME);
- scanner.setRange(MetadataSchema.TabletsSection.getRange());
- int count = 0;
- for (Entry<Key,Value> entry : scanner) {
- log.debug("Saw " + entry.getKey() + "=" + entry.getValue());
- count++;
- }
- return count;
- }
-
- int count(String tableName, Connector conn) throws Exception {
- Scanner s = conn.createScanner(tableName, Authorizations.EMPTY);
- return Iterators.size(s.iterator());
- }
-
-}
[06/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java b/test/src/test/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
deleted file mode 100644
index 9797d7b..0000000
--- a/test/src/test/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
+++ /dev/null
@@ -1,2273 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.proxy;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.InputStreamReader;
-import java.net.InetAddress;
-import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.UUID;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVWriter;
-import org.apache.accumulo.core.iterators.DevNull;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.iterators.user.SummingCombiner;
-import org.apache.accumulo.core.iterators.user.VersioningIterator;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
-import org.apache.accumulo.harness.MiniClusterHarness;
-import org.apache.accumulo.harness.SharedMiniClusterBase;
-import org.apache.accumulo.harness.TestingKdc;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.proxy.Proxy;
-import org.apache.accumulo.proxy.thrift.AccumuloProxy.Client;
-import org.apache.accumulo.proxy.thrift.AccumuloSecurityException;
-import org.apache.accumulo.proxy.thrift.ActiveCompaction;
-import org.apache.accumulo.proxy.thrift.ActiveScan;
-import org.apache.accumulo.proxy.thrift.BatchScanOptions;
-import org.apache.accumulo.proxy.thrift.Column;
-import org.apache.accumulo.proxy.thrift.ColumnUpdate;
-import org.apache.accumulo.proxy.thrift.CompactionReason;
-import org.apache.accumulo.proxy.thrift.CompactionStrategyConfig;
-import org.apache.accumulo.proxy.thrift.CompactionType;
-import org.apache.accumulo.proxy.thrift.Condition;
-import org.apache.accumulo.proxy.thrift.ConditionalStatus;
-import org.apache.accumulo.proxy.thrift.ConditionalUpdates;
-import org.apache.accumulo.proxy.thrift.ConditionalWriterOptions;
-import org.apache.accumulo.proxy.thrift.DiskUsage;
-import org.apache.accumulo.proxy.thrift.IteratorScope;
-import org.apache.accumulo.proxy.thrift.IteratorSetting;
-import org.apache.accumulo.proxy.thrift.Key;
-import org.apache.accumulo.proxy.thrift.KeyValue;
-import org.apache.accumulo.proxy.thrift.MutationsRejectedException;
-import org.apache.accumulo.proxy.thrift.PartialKey;
-import org.apache.accumulo.proxy.thrift.Range;
-import org.apache.accumulo.proxy.thrift.ScanColumn;
-import org.apache.accumulo.proxy.thrift.ScanOptions;
-import org.apache.accumulo.proxy.thrift.ScanResult;
-import org.apache.accumulo.proxy.thrift.ScanState;
-import org.apache.accumulo.proxy.thrift.ScanType;
-import org.apache.accumulo.proxy.thrift.SystemPermission;
-import org.apache.accumulo.proxy.thrift.TableExistsException;
-import org.apache.accumulo.proxy.thrift.TableNotFoundException;
-import org.apache.accumulo.proxy.thrift.TablePermission;
-import org.apache.accumulo.proxy.thrift.TimeType;
-import org.apache.accumulo.proxy.thrift.UnknownScanner;
-import org.apache.accumulo.proxy.thrift.UnknownWriter;
-import org.apache.accumulo.proxy.thrift.WriterOptions;
-import org.apache.accumulo.server.util.PortUtils;
-import org.apache.accumulo.test.functional.SlowIterator;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.thrift.TApplicationException;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TProtocolFactory;
-import org.apache.thrift.server.TServer;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-import com.google.common.net.HostAndPort;
-
-/**
- * Call every method on the proxy and try to verify that it works.
- */
-public abstract class SimpleProxyBase extends SharedMiniClusterBase {
- private static final Logger log = LoggerFactory.getLogger(SimpleProxyBase.class);
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- private static final long ZOOKEEPER_PROPAGATION_TIME = 10 * 1000;
- private static TServer proxyServer;
- private static int proxyPort;
-
- private TestProxyClient proxyClient;
- private org.apache.accumulo.proxy.thrift.AccumuloProxy.Client client;
-
- private static Map<String,String> properties = new HashMap<>();
- private static ByteBuffer creds = null;
- private static String hostname, proxyPrincipal, proxyPrimary, clientPrincipal;
- private static File proxyKeytab, clientKeytab;
-
- // Implementations can set this
- static TProtocolFactory factory = null;
-
- private static void waitForAccumulo(Connector c) throws Exception {
- Iterators.size(c.createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
- }
-
- private static boolean isKerberosEnabled() {
- return SharedMiniClusterBase.TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION));
- }
-
- /**
- * Does the actual test setup, invoked by the concrete test class
- */
- public static void setUpProxy() throws Exception {
- assertNotNull("Implementations must initialize the TProtocolFactory", factory);
-
- Connector c = SharedMiniClusterBase.getConnector();
- Instance inst = c.getInstance();
- waitForAccumulo(c);
-
- hostname = InetAddress.getLocalHost().getCanonicalHostName();
-
- Properties props = new Properties();
- props.put("instance", inst.getInstanceName());
- props.put("zookeepers", inst.getZooKeepers());
-
- final String tokenClass;
- if (isKerberosEnabled()) {
- tokenClass = KerberosToken.class.getName();
- TestingKdc kdc = getKdc();
-
- // Create a principal+keytab for the proxy
- proxyKeytab = new File(kdc.getKeytabDir(), "proxy.keytab");
- hostname = InetAddress.getLocalHost().getCanonicalHostName();
- // Set the primary because the client needs to know it
- proxyPrimary = "proxy";
- // Qualify with an instance
- proxyPrincipal = proxyPrimary + "/" + hostname;
- kdc.createPrincipal(proxyKeytab, proxyPrincipal);
- // Tack on the realm too
- proxyPrincipal = kdc.qualifyUser(proxyPrincipal);
-
- props.setProperty("kerberosPrincipal", proxyPrincipal);
- props.setProperty("kerberosKeytab", proxyKeytab.getCanonicalPath());
- props.setProperty("thriftServerType", "sasl");
-
- // Enabled kerberos auth
- Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- UserGroupInformation.setConfiguration(conf);
-
- // Login for the Proxy itself
- UserGroupInformation.loginUserFromKeytab(proxyPrincipal, proxyKeytab.getAbsolutePath());
-
- // User for tests
- ClusterUser user = kdc.getRootUser();
- clientPrincipal = user.getPrincipal();
- clientKeytab = user.getKeytab();
- } else {
- clientPrincipal = "root";
- tokenClass = PasswordToken.class.getName();
- properties.put("password", SharedMiniClusterBase.getRootPassword());
- hostname = "localhost";
- }
-
- props.put("tokenClass", tokenClass);
-
- ClientConfiguration clientConfig = SharedMiniClusterBase.getCluster().getClientConfig();
- String clientConfPath = new File(SharedMiniClusterBase.getCluster().getConfig().getConfDir(), "client.conf").getAbsolutePath();
- props.put("clientConfigurationFile", clientConfPath);
- properties.put("clientConfigurationFile", clientConfPath);
-
- proxyPort = PortUtils.getRandomFreePort();
- proxyServer = Proxy.createProxyServer(HostAndPort.fromParts(hostname, proxyPort), factory, props, clientConfig).server;
- while (!proxyServer.isServing())
- UtilWaitThread.sleep(100);
- }
-
- @AfterClass
- public static void tearDownProxy() throws Exception {
- if (null != proxyServer) {
- proxyServer.stop();
- }
- }
-
- final IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "200"));
- String table;
- ByteBuffer badLogin;
-
- @Before
- public void setup() throws Exception {
- // Create a new client for each test
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- proxyClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, UserGroupInformation.getCurrentUser());
- client = proxyClient.proxy();
- creds = client.login(clientPrincipal, properties);
-
- TestingKdc kdc = getKdc();
- final ClusterUser user = kdc.getClientPrincipal(0);
- // Create another user
- client.createLocalUser(creds, user.getPrincipal(), s2bb("unused"));
- // Login in as that user we just created
- UserGroupInformation.loginUserFromKeytab(user.getPrincipal(), user.getKeytab().getAbsolutePath());
- final UserGroupInformation badUgi = UserGroupInformation.getCurrentUser();
- // Get a "Credentials" object for the proxy
- TestProxyClient badClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, badUgi);
- try {
- Client badProxy = badClient.proxy();
- badLogin = badProxy.login(user.getPrincipal(), properties);
- } finally {
- badClient.close();
- }
-
- // Log back in as the test user
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- // Drop test user, invalidating the credentials (not to mention not having the krb credentials anymore)
- client.dropLocalUser(creds, user.getPrincipal());
- } else {
- proxyClient = new TestProxyClient(hostname, proxyPort, factory);
- client = proxyClient.proxy();
- creds = client.login("root", properties);
-
- // Create 'user'
- client.createLocalUser(creds, "user", s2bb(SharedMiniClusterBase.getRootPassword()));
- // Log in as 'user'
- badLogin = client.login("user", properties);
- // Drop 'user', invalidating the credentials
- client.dropLocalUser(creds, "user");
- }
-
- // Create a general table to be used
- table = getUniqueNames(1)[0];
- client.createTable(creds, table, true, TimeType.MILLIS);
- }
-
- @After
- public void teardown() throws Exception {
- if (null != table) {
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- }
- try {
- if (client.tableExists(creds, table)) {
- client.deleteTable(creds, table);
- }
- } catch (Exception e) {
- log.warn("Failed to delete test table", e);
- }
- }
-
- // Close the transport after the test
- if (null != proxyClient) {
- proxyClient.close();
- }
- }
-
- /*
- * Set a lower timeout for tests that should fail fast
- */
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void addConstraintLoginFailure() throws Exception {
- client.addConstraint(badLogin, table, NumericValueConstraint.class.getName());
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void addSplitsLoginFailure() throws Exception {
- client.addSplits(badLogin, table, Collections.singleton(s2bb("1")));
- }
-
- @Test(expected = TApplicationException.class, timeout = 5000)
- public void clearLocatorCacheLoginFailure() throws Exception {
- client.clearLocatorCache(badLogin, table);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void compactTableLoginFailure() throws Exception {
- client.compactTable(badLogin, table, null, null, null, true, false, null);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void cancelCompactionLoginFailure() throws Exception {
- client.cancelCompaction(badLogin, table);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void createTableLoginFailure() throws Exception {
- client.createTable(badLogin, table, false, TimeType.MILLIS);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void deleteTableLoginFailure() throws Exception {
- client.deleteTable(badLogin, table);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void deleteRowsLoginFailure() throws Exception {
- client.deleteRows(badLogin, table, null, null);
- }
-
- @Test(expected = TApplicationException.class, timeout = 5000)
- public void tableExistsLoginFailure() throws Exception {
- client.tableExists(badLogin, table);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void flustTableLoginFailure() throws Exception {
- client.flushTable(badLogin, table, null, null, false);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void getLocalityGroupsLoginFailure() throws Exception {
- client.getLocalityGroups(badLogin, table);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void getMaxRowLoginFailure() throws Exception {
- client.getMaxRow(badLogin, table, Collections.<ByteBuffer> emptySet(), null, false, null, false);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void getTablePropertiesLoginFailure() throws Exception {
- client.getTableProperties(badLogin, table);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void listSplitsLoginFailure() throws Exception {
- client.listSplits(badLogin, table, 10000);
- }
-
- @Test(expected = TApplicationException.class, timeout = 5000)
- public void listTablesLoginFailure() throws Exception {
- client.listTables(badLogin);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void listConstraintsLoginFailure() throws Exception {
- client.listConstraints(badLogin, table);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void mergeTabletsLoginFailure() throws Exception {
- client.mergeTablets(badLogin, table, null, null);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void offlineTableLoginFailure() throws Exception {
- client.offlineTable(badLogin, table, false);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void onlineTableLoginFailure() throws Exception {
- client.onlineTable(badLogin, table, false);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void removeConstraintLoginFailure() throws Exception {
- client.removeConstraint(badLogin, table, 0);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void removeTablePropertyLoginFailure() throws Exception {
- client.removeTableProperty(badLogin, table, Property.TABLE_FILE_MAX.getKey());
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void renameTableLoginFailure() throws Exception {
- client.renameTable(badLogin, table, "someTableName");
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void setLocalityGroupsLoginFailure() throws Exception {
- Map<String,Set<String>> groups = new HashMap<String,Set<String>>();
- groups.put("group1", Collections.singleton("cf1"));
- groups.put("group2", Collections.singleton("cf2"));
- client.setLocalityGroups(badLogin, table, groups);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void setTablePropertyLoginFailure() throws Exception {
- client.setTableProperty(badLogin, table, Property.TABLE_FILE_MAX.getKey(), "0");
- }
-
- @Test(expected = TException.class, timeout = 5000)
- public void tableIdMapLoginFailure() throws Exception {
- client.tableIdMap(badLogin);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void getSiteConfigurationLoginFailure() throws Exception {
- client.getSiteConfiguration(badLogin);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void getSystemConfigurationLoginFailure() throws Exception {
- client.getSystemConfiguration(badLogin);
- }
-
- @Test(expected = TException.class, timeout = 5000)
- public void getTabletServersLoginFailure() throws Exception {
- client.getTabletServers(badLogin);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void getActiveScansLoginFailure() throws Exception {
- client.getActiveScans(badLogin, "fake");
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void getActiveCompactionsLoginFailure() throws Exception {
- client.getActiveCompactions(badLogin, "fakse");
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void removePropertyLoginFailure() throws Exception {
- client.removeProperty(badLogin, "table.split.threshold");
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void setPropertyLoginFailure() throws Exception {
- client.setProperty(badLogin, "table.split.threshold", "500M");
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void testClassLoadLoginFailure() throws Exception {
- client.testClassLoad(badLogin, DevNull.class.getName(), SortedKeyValueIterator.class.getName());
- }
-
- @Test(timeout = 5000)
- public void authenticateUserLoginFailure() throws Exception {
- if (!isKerberosEnabled()) {
- try {
- // Not really a relevant test for kerberos
- client.authenticateUser(badLogin, "root", s2pp(SharedMiniClusterBase.getRootPassword()));
- fail("Expected AccumuloSecurityException");
- } catch (AccumuloSecurityException e) {
- // Expected
- return;
- }
- }
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void changeUserAuthorizationsLoginFailure() throws Exception {
- HashSet<ByteBuffer> auths = new HashSet<ByteBuffer>(Arrays.asList(s2bb("A"), s2bb("B")));
- client.changeUserAuthorizations(badLogin, "stooge", auths);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void changePasswordLoginFailure() throws Exception {
- client.changeLocalUserPassword(badLogin, "stooge", s2bb(""));
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void createUserLoginFailure() throws Exception {
- client.createLocalUser(badLogin, "stooge", s2bb("password"));
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void dropUserLoginFailure() throws Exception {
- client.dropLocalUser(badLogin, "stooge");
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void getUserAuthorizationsLoginFailure() throws Exception {
- client.getUserAuthorizations(badLogin, "stooge");
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void grantSystemPermissionLoginFailure() throws Exception {
- client.grantSystemPermission(badLogin, "stooge", SystemPermission.CREATE_TABLE);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void grantTablePermissionLoginFailure() throws Exception {
- client.grantTablePermission(badLogin, "root", table, TablePermission.WRITE);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void hasSystemPermissionLoginFailure() throws Exception {
- client.hasSystemPermission(badLogin, "stooge", SystemPermission.CREATE_TABLE);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void hasTablePermission() throws Exception {
- client.hasTablePermission(badLogin, "root", table, TablePermission.WRITE);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void listLocalUsersLoginFailure() throws Exception {
- client.listLocalUsers(badLogin);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void revokeSystemPermissionLoginFailure() throws Exception {
- client.revokeSystemPermission(badLogin, "stooge", SystemPermission.CREATE_TABLE);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void revokeTablePermissionLoginFailure() throws Exception {
- client.revokeTablePermission(badLogin, "root", table, TablePermission.ALTER_TABLE);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void createScannerLoginFailure() throws Exception {
- client.createScanner(badLogin, table, new ScanOptions());
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void createBatchScannerLoginFailure() throws Exception {
- client.createBatchScanner(badLogin, table, new BatchScanOptions());
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void updateAndFlushLoginFailure() throws Exception {
- client.updateAndFlush(badLogin, table, new HashMap<ByteBuffer,List<ColumnUpdate>>());
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void createWriterLoginFailure() throws Exception {
- client.createWriter(badLogin, table, new WriterOptions());
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void attachIteratorLoginFailure() throws Exception {
- client.attachIterator(badLogin, "slow", setting, EnumSet.allOf(IteratorScope.class));
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void checkIteratorLoginFailure() throws Exception {
- client.checkIteratorConflicts(badLogin, table, setting, EnumSet.allOf(IteratorScope.class));
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void cloneTableLoginFailure() throws Exception {
- client.cloneTable(badLogin, table, table + "_clone", false, null, null);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void exportTableLoginFailure() throws Exception {
- client.exportTable(badLogin, table, "/tmp");
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void importTableLoginFailure() throws Exception {
- client.importTable(badLogin, "testify", "/tmp");
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void getIteratorSettingLoginFailure() throws Exception {
- client.getIteratorSetting(badLogin, table, "foo", IteratorScope.SCAN);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void listIteratorsLoginFailure() throws Exception {
- client.listIterators(badLogin, table);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void removeIteratorLoginFailure() throws Exception {
- client.removeIterator(badLogin, table, "name", EnumSet.allOf(IteratorScope.class));
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void splitRangeByTabletsLoginFailure() throws Exception {
- client.splitRangeByTablets(badLogin, table, client.getRowRange(ByteBuffer.wrap("row".getBytes())), 10);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void importDirectoryLoginFailure() throws Exception {
- MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
- Path base = cluster.getTemporaryPath();
- Path importDir = new Path(base, "importDir");
- Path failuresDir = new Path(base, "failuresDir");
- assertTrue(cluster.getFileSystem().mkdirs(importDir));
- assertTrue(cluster.getFileSystem().mkdirs(failuresDir));
- client.importDirectory(badLogin, table, importDir.toString(), failuresDir.toString(), true);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void pingTabletServerLoginFailure() throws Exception {
- client.pingTabletServer(badLogin, "fake");
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void loginFailure() throws Exception {
- client.login("badUser", properties);
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void testTableClassLoadLoginFailure() throws Exception {
- client.testTableClassLoad(badLogin, table, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
- }
-
- @Test(expected = AccumuloSecurityException.class, timeout = 5000)
- public void createConditionalWriterLoginFailure() throws Exception {
- client.createConditionalWriter(badLogin, table, new ConditionalWriterOptions());
- }
-
- @Test
- public void tableNotFound() throws Exception {
- final String doesNotExist = "doesNotExists";
- try {
- client.addConstraint(creds, doesNotExist, NumericValueConstraint.class.getName());
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.addSplits(creds, doesNotExist, Collections.<ByteBuffer> emptySet());
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- final IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "200"));
- try {
- client.attachIterator(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.cancelCompaction(creds, doesNotExist);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.checkIteratorConflicts(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.clearLocatorCache(creds, doesNotExist);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- final String TABLE_TEST = getUniqueNames(1)[0];
- client.cloneTable(creds, doesNotExist, TABLE_TEST, false, null, null);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.compactTable(creds, doesNotExist, null, null, null, true, false, null);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.createBatchScanner(creds, doesNotExist, new BatchScanOptions());
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.createScanner(creds, doesNotExist, new ScanOptions());
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.createWriter(creds, doesNotExist, new WriterOptions());
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.deleteRows(creds, doesNotExist, null, null);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.deleteTable(creds, doesNotExist);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.exportTable(creds, doesNotExist, "/tmp");
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.flushTable(creds, doesNotExist, null, null, false);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.getIteratorSetting(creds, doesNotExist, "foo", IteratorScope.SCAN);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.getLocalityGroups(creds, doesNotExist);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.getMaxRow(creds, doesNotExist, Collections.<ByteBuffer> emptySet(), null, false, null, false);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.getTableProperties(creds, doesNotExist);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.grantTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.hasTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
- Path base = cluster.getTemporaryPath();
- Path importDir = new Path(base, "importDir");
- Path failuresDir = new Path(base, "failuresDir");
- assertTrue(cluster.getFileSystem().mkdirs(importDir));
- assertTrue(cluster.getFileSystem().mkdirs(failuresDir));
- client.importDirectory(creds, doesNotExist, importDir.toString(), failuresDir.toString(), true);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.listConstraints(creds, doesNotExist);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.listSplits(creds, doesNotExist, 10000);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.mergeTablets(creds, doesNotExist, null, null);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.offlineTable(creds, doesNotExist, false);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.onlineTable(creds, doesNotExist, false);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.removeConstraint(creds, doesNotExist, 0);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.removeIterator(creds, doesNotExist, "name", EnumSet.allOf(IteratorScope.class));
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.removeTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey());
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.renameTable(creds, doesNotExist, "someTableName");
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.revokeTablePermission(creds, "root", doesNotExist, TablePermission.ALTER_TABLE);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.setTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey(), "0");
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.splitRangeByTablets(creds, doesNotExist, client.getRowRange(ByteBuffer.wrap("row".getBytes())), 10);
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.updateAndFlush(creds, doesNotExist, new HashMap<ByteBuffer,List<ColumnUpdate>>());
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.getDiskUsage(creds, Collections.singleton(doesNotExist));
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.testTableClassLoad(creds, doesNotExist, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
- fail("exception not thrown");
- } catch (TableNotFoundException ex) {}
- try {
- client.createConditionalWriter(creds, doesNotExist, new ConditionalWriterOptions());
- } catch (TableNotFoundException ex) {}
- }
-
- @Test
- public void testExists() throws Exception {
- client.createTable(creds, "ett1", false, TimeType.MILLIS);
- client.createTable(creds, "ett2", false, TimeType.MILLIS);
- try {
- client.createTable(creds, "ett1", false, TimeType.MILLIS);
- fail("exception not thrown");
- } catch (TableExistsException tee) {}
- try {
- client.renameTable(creds, "ett1", "ett2");
- fail("exception not thrown");
- } catch (TableExistsException tee) {}
- try {
- client.cloneTable(creds, "ett1", "ett2", false, new HashMap<String,String>(), new HashSet<String>());
- fail("exception not thrown");
- } catch (TableExistsException tee) {}
- }
-
- @Test
- public void testUnknownScanner() throws Exception {
- String scanner = client.createScanner(creds, table, null);
- assertFalse(client.hasNext(scanner));
- client.closeScanner(scanner);
-
- try {
- client.hasNext(scanner);
- fail("exception not thrown");
- } catch (UnknownScanner us) {}
-
- try {
- client.closeScanner(scanner);
- fail("exception not thrown");
- } catch (UnknownScanner us) {}
-
- try {
- client.nextEntry("99999999");
- fail("exception not thrown");
- } catch (UnknownScanner us) {}
- try {
- client.nextK("99999999", 6);
- fail("exception not thrown");
- } catch (UnknownScanner us) {}
- try {
- client.hasNext("99999999");
- fail("exception not thrown");
- } catch (UnknownScanner us) {}
- try {
- client.hasNext(UUID.randomUUID().toString());
- fail("exception not thrown");
- } catch (UnknownScanner us) {}
- }
-
- @Test
- public void testUnknownWriter() throws Exception {
- String writer = client.createWriter(creds, table, null);
- client.update(writer, mutation("row0", "cf", "cq", "value"));
- client.flush(writer);
- client.update(writer, mutation("row2", "cf", "cq", "value2"));
- client.closeWriter(writer);
-
- // this is a oneway call, so it does not throw exceptions
- client.update(writer, mutation("row2", "cf", "cq", "value2"));
-
- try {
- client.flush(writer);
- fail("exception not thrown");
- } catch (UnknownWriter uw) {}
- try {
- client.flush("99999");
- fail("exception not thrown");
- } catch (UnknownWriter uw) {}
- try {
- client.flush(UUID.randomUUID().toString());
- fail("exception not thrown");
- } catch (UnknownWriter uw) {}
- try {
- client.closeWriter("99999");
- fail("exception not thrown");
- } catch (UnknownWriter uw) {}
- }
-
- @Test
- public void testDelete() throws Exception {
- client.updateAndFlush(creds, table, mutation("row0", "cf", "cq", "value"));
-
- assertScan(new String[][] {{"row0", "cf", "cq", "value"}}, table);
-
- ColumnUpdate upd = new ColumnUpdate(s2bb("cf"), s2bb("cq"));
- upd.setDeleteCell(false);
- Map<ByteBuffer,List<ColumnUpdate>> notDelete = Collections.singletonMap(s2bb("row0"), Collections.singletonList(upd));
- client.updateAndFlush(creds, table, notDelete);
- String scanner = client.createScanner(creds, table, null);
- ScanResult entries = client.nextK(scanner, 10);
- client.closeScanner(scanner);
- assertFalse(entries.more);
- assertEquals("Results: " + entries.results, 1, entries.results.size());
-
- upd = new ColumnUpdate(s2bb("cf"), s2bb("cq"));
- upd.setDeleteCell(true);
- Map<ByteBuffer,List<ColumnUpdate>> delete = Collections.singletonMap(s2bb("row0"), Collections.singletonList(upd));
-
- client.updateAndFlush(creds, table, delete);
-
- assertScan(new String[][] {}, table);
- }
-
- @Test
- public void testSystemProperties() throws Exception {
- Map<String,String> cfg = client.getSiteConfiguration(creds);
-
- // set a property in zookeeper
- client.setProperty(creds, "table.split.threshold", "500M");
-
- // check that we can read it
- for (int i = 0; i < 5; i++) {
- cfg = client.getSystemConfiguration(creds);
- if ("500M".equals(cfg.get("table.split.threshold")))
- break;
- UtilWaitThread.sleep(200);
- }
- assertEquals("500M", cfg.get("table.split.threshold"));
-
- // unset the setting, check that it's not what it was
- client.removeProperty(creds, "table.split.threshold");
- for (int i = 0; i < 5; i++) {
- cfg = client.getSystemConfiguration(creds);
- if (!"500M".equals(cfg.get("table.split.threshold")))
- break;
- UtilWaitThread.sleep(200);
- }
- assertNotEquals("500M", cfg.get("table.split.threshold"));
- }
-
- @Test
- public void pingTabletServers() throws Exception {
- int tservers = 0;
- for (String tserver : client.getTabletServers(creds)) {
- client.pingTabletServer(creds, tserver);
- tservers++;
- }
- assertTrue(tservers > 0);
- }
-
- @Test
- public void testSiteConfiguration() throws Exception {
- // get something we know is in the site config
- MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
- Map<String,String> cfg = client.getSiteConfiguration(creds);
- assertTrue(cfg.get("instance.dfs.dir").startsWith(cluster.getConfig().getAccumuloDir().getAbsolutePath()));
- }
-
- @Test
- public void testClassLoad() throws Exception {
- // try to load some classes via the proxy
- assertTrue(client.testClassLoad(creds, DevNull.class.getName(), SortedKeyValueIterator.class.getName()));
- assertFalse(client.testClassLoad(creds, "foo.bar", SortedKeyValueIterator.class.getName()));
- }
-
- @Test
- public void attachIteratorsWithScans() throws Exception {
- if (client.tableExists(creds, "slow")) {
- client.deleteTable(creds, "slow");
- }
-
- // create a table that's very slow, so we can look for scans
- client.createTable(creds, "slow", true, TimeType.MILLIS);
- IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "250"));
- client.attachIterator(creds, "slow", setting, EnumSet.allOf(IteratorScope.class));
-
- // Should take 10 seconds to read every record
- for (int i = 0; i < 40; i++) {
- client.updateAndFlush(creds, "slow", mutation("row" + i, "cf", "cq", "value"));
- }
-
- // scan
- Thread t = new Thread() {
- @Override
- public void run() {
- String scanner;
- TestProxyClient proxyClient2 = null;
- try {
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- proxyClient2 = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, UserGroupInformation.getCurrentUser());
- } else {
- proxyClient2 = new TestProxyClient(hostname, proxyPort, factory);
- }
-
- Client client2 = proxyClient2.proxy();
- scanner = client2.createScanner(creds, "slow", null);
- client2.nextK(scanner, 10);
- client2.closeScanner(scanner);
- } catch (Exception e) {
- throw new RuntimeException(e);
- } finally {
- if (null != proxyClient2) {
- proxyClient2.close();
- }
- }
- }
- };
- t.start();
-
- // look for the scan many times
- List<ActiveScan> scans = new ArrayList<ActiveScan>();
- for (int i = 0; i < 100 && scans.isEmpty(); i++) {
- for (String tserver : client.getTabletServers(creds)) {
- List<ActiveScan> scansForServer = client.getActiveScans(creds, tserver);
- for (ActiveScan scan : scansForServer) {
- if (clientPrincipal.equals(scan.getUser())) {
- scans.add(scan);
- }
- }
-
- if (!scans.isEmpty())
- break;
- UtilWaitThread.sleep(100);
- }
- }
- t.join();
-
- assertFalse("Expected to find scans, but found none", scans.isEmpty());
- boolean found = false;
- Map<String,String> map = null;
- for (int i = 0; i < scans.size() && !found; i++) {
- ActiveScan scan = scans.get(i);
- if (clientPrincipal.equals(scan.getUser())) {
- assertTrue(ScanState.RUNNING.equals(scan.getState()) || ScanState.QUEUED.equals(scan.getState()));
- assertEquals(ScanType.SINGLE, scan.getType());
- assertEquals("slow", scan.getTable());
-
- map = client.tableIdMap(creds);
- assertEquals(map.get("slow"), scan.getExtent().tableId);
- assertTrue(scan.getExtent().endRow == null);
- assertTrue(scan.getExtent().prevEndRow == null);
- found = true;
- }
- }
-
- assertTrue("Could not find a scan against the 'slow' table", found);
- }
-
- @Test
- public void attachIteratorWithCompactions() throws Exception {
- if (client.tableExists(creds, "slow")) {
- client.deleteTable(creds, "slow");
- }
-
- // create a table that's very slow, so we can look for compactions
- client.createTable(creds, "slow", true, TimeType.MILLIS);
- IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "250"));
- client.attachIterator(creds, "slow", setting, EnumSet.allOf(IteratorScope.class));
-
- // Should take 10 seconds to read every record
- for (int i = 0; i < 40; i++) {
- client.updateAndFlush(creds, "slow", mutation("row" + i, "cf", "cq", "value"));
- }
-
- Map<String,String> map = client.tableIdMap(creds);
-
- // start a compaction
- Thread t = new Thread() {
- @Override
- public void run() {
- TestProxyClient proxyClient2 = null;
- try {
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- proxyClient2 = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, UserGroupInformation.getCurrentUser());
- } else {
- proxyClient2 = new TestProxyClient(hostname, proxyPort, factory);
- }
- Client client2 = proxyClient2.proxy();
- client2.compactTable(creds, "slow", null, null, null, true, true, null);
- } catch (Exception e) {
- throw new RuntimeException(e);
- } finally {
- if (null != proxyClient2) {
- proxyClient2.close();
- }
- }
- }
- };
- t.start();
-
- final String desiredTableId = map.get("slow");
-
- // Make sure we can find the slow table
- assertNotNull(desiredTableId);
-
- // try to catch it in the act
- List<ActiveCompaction> compactions = new ArrayList<ActiveCompaction>();
- for (int i = 0; i < 100 && compactions.isEmpty(); i++) {
- // Iterate over the tservers
- for (String tserver : client.getTabletServers(creds)) {
- // And get the compactions on each
- List<ActiveCompaction> compactionsOnServer = client.getActiveCompactions(creds, tserver);
- for (ActiveCompaction compact : compactionsOnServer) {
- // There might be other compactions occurring (e.g. on METADATA) in which
- // case we want to prune out those that aren't for our slow table
- if (desiredTableId.equals(compact.getExtent().tableId)) {
- compactions.add(compact);
- }
- }
-
- // If we found a compaction for the table we wanted, so we can stop looking
- if (!compactions.isEmpty())
- break;
- }
- UtilWaitThread.sleep(10);
- }
- t.join();
-
- // verify the compaction information
- assertFalse(compactions.isEmpty());
- for (ActiveCompaction c : compactions) {
- if (desiredTableId.equals(c.getExtent().tableId)) {
- assertTrue(c.inputFiles.isEmpty());
- assertEquals(CompactionType.MINOR, c.getType());
- assertEquals(CompactionReason.USER, c.getReason());
- assertEquals("", c.localityGroup);
- assertTrue(c.outputFile.contains("default_tablet"));
-
- return;
- }
- }
-
- fail("Expection to find running compaction for table 'slow' but did not find one");
- }
-
- @Test
- public void userAuthentication() throws Exception {
- if (isKerberosEnabled()) {
- assertTrue(client.authenticateUser(creds, clientPrincipal, Collections.<String,String> emptyMap()));
- // Can't really authenticate "badly" at the application level w/ kerberos. It's going to fail to even set up an RPC
- } else {
- // check password
- assertTrue(client.authenticateUser(creds, "root", s2pp(SharedMiniClusterBase.getRootPassword())));
- assertFalse(client.authenticateUser(creds, "root", s2pp("")));
- }
- }
-
- @Test
- public void userManagement() throws Exception {
-
- String user;
- ClusterUser otherClient = null;
- ByteBuffer password = s2bb("password");
- if (isKerberosEnabled()) {
- otherClient = getKdc().getClientPrincipal(1);
- user = otherClient.getPrincipal();
- } else {
- user = getUniqueNames(1)[0];
- }
-
- // create a user
- client.createLocalUser(creds, user, password);
- // change auths
- Set<String> users = client.listLocalUsers(creds);
- Set<String> expectedUsers = new HashSet<String>(Arrays.asList(clientPrincipal, user));
- assertTrue("Did not find all expected users: " + expectedUsers, users.containsAll(expectedUsers));
- HashSet<ByteBuffer> auths = new HashSet<ByteBuffer>(Arrays.asList(s2bb("A"), s2bb("B")));
- client.changeUserAuthorizations(creds, user, auths);
- List<ByteBuffer> update = client.getUserAuthorizations(creds, user);
- assertEquals(auths, new HashSet<ByteBuffer>(update));
-
- // change password
- if (!isKerberosEnabled()) {
- password = s2bb("");
- client.changeLocalUserPassword(creds, user, password);
- assertTrue(client.authenticateUser(creds, user, s2pp(password.toString())));
- }
-
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
- final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
- // Re-login in and make a new connection. Can't use the previous one
-
- TestProxyClient otherProxyClient = null;
- try {
- otherProxyClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, ugi);
- otherProxyClient.proxy().login(user, Collections.<String,String> emptyMap());
- } finally {
- if (null != otherProxyClient) {
- otherProxyClient.close();
- }
- }
- } else {
- // check login with new password
- client.login(user, s2pp(password.toString()));
- }
- }
-
- @Test
- public void userPermissions() throws Exception {
- String userName = getUniqueNames(1)[0];
- ClusterUser otherClient = null;
- ByteBuffer password = s2bb("password");
- ByteBuffer user;
-
- TestProxyClient origProxyClient = null;
- Client origClient = null;
- TestProxyClient userProxyClient = null;
- Client userClient = null;
-
- if (isKerberosEnabled()) {
- otherClient = getKdc().getClientPrincipal(1);
- userName = otherClient.getPrincipal();
-
- UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
- final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
- // Re-login in and make a new connection. Can't use the previous one
-
- userProxyClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, ugi);
-
- origProxyClient = proxyClient;
- origClient = client;
- userClient = client = userProxyClient.proxy();
-
- user = client.login(userName, Collections.<String,String> emptyMap());
- } else {
- userName = getUniqueNames(1)[0];
- // create a user
- client.createLocalUser(creds, userName, password);
- user = client.login(userName, s2pp(password.toString()));
- }
-
- // check permission failure
- try {
- client.createTable(user, "fail", true, TimeType.MILLIS);
- fail("should not create the table");
- } catch (AccumuloSecurityException ex) {
- if (isKerberosEnabled()) {
- // Switch back to original client
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- client = origClient;
- }
- assertFalse(client.listTables(creds).contains("fail"));
- }
- // grant permissions and test
- assertFalse(client.hasSystemPermission(creds, userName, SystemPermission.CREATE_TABLE));
- client.grantSystemPermission(creds, userName, SystemPermission.CREATE_TABLE);
- assertTrue(client.hasSystemPermission(creds, userName, SystemPermission.CREATE_TABLE));
- if (isKerberosEnabled()) {
- // Switch back to the extra user
- UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
- client = userClient;
- }
- client.createTable(user, "success", true, TimeType.MILLIS);
- if (isKerberosEnabled()) {
- // Switch back to original client
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- client = origClient;
- }
- client.listTables(creds).contains("succcess");
-
- // revoke permissions
- client.revokeSystemPermission(creds, userName, SystemPermission.CREATE_TABLE);
- assertFalse(client.hasSystemPermission(creds, userName, SystemPermission.CREATE_TABLE));
- try {
- if (isKerberosEnabled()) {
- // Switch back to the extra user
- UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
- client = userClient;
- }
- client.createTable(user, "fail", true, TimeType.MILLIS);
- fail("should not create the table");
- } catch (AccumuloSecurityException ex) {
- if (isKerberosEnabled()) {
- // Switch back to original client
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- client = origClient;
- }
- assertFalse(client.listTables(creds).contains("fail"));
- }
- // denied!
- try {
- if (isKerberosEnabled()) {
- // Switch back to the extra user
- UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
- client = userClient;
- }
- String scanner = client.createScanner(user, table, null);
- client.nextK(scanner, 100);
- fail("stooge should not read table test");
- } catch (AccumuloSecurityException ex) {}
-
- if (isKerberosEnabled()) {
- // Switch back to original client
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- client = origClient;
- }
-
- // grant
- assertFalse(client.hasTablePermission(creds, userName, table, TablePermission.READ));
- client.grantTablePermission(creds, userName, table, TablePermission.READ);
- assertTrue(client.hasTablePermission(creds, userName, table, TablePermission.READ));
-
- if (isKerberosEnabled()) {
- // Switch back to the extra user
- UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
- client = userClient;
- }
- String scanner = client.createScanner(user, table, null);
- client.nextK(scanner, 10);
- client.closeScanner(scanner);
-
- if (isKerberosEnabled()) {
- // Switch back to original client
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- client = origClient;
- }
-
- // revoke
- client.revokeTablePermission(creds, userName, table, TablePermission.READ);
- assertFalse(client.hasTablePermission(creds, userName, table, TablePermission.READ));
- try {
- if (isKerberosEnabled()) {
- // Switch back to the extra user
- UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
- client = userClient;
- }
- scanner = client.createScanner(user, table, null);
- client.nextK(scanner, 100);
- fail("stooge should not read table test");
- } catch (AccumuloSecurityException ex) {}
-
- if (isKerberosEnabled()) {
- // Switch back to original client
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- client = origClient;
- }
-
- // delete user
- client.dropLocalUser(creds, userName);
- Set<String> users = client.listLocalUsers(creds);
- assertFalse("Should not see user after they are deleted", users.contains(userName));
-
- if (isKerberosEnabled()) {
- userProxyClient.close();
- proxyClient = origProxyClient;
- client = origClient;
- }
- }
-
- @Test
- public void testBatchWriter() throws Exception {
- client.addConstraint(creds, table, NumericValueConstraint.class.getName());
- // zookeeper propagation time
- UtilWaitThread.sleep(ZOOKEEPER_PROPAGATION_TIME);
-
- WriterOptions writerOptions = new WriterOptions();
- writerOptions.setLatencyMs(10000);
- writerOptions.setMaxMemory(2);
- writerOptions.setThreads(1);
- writerOptions.setTimeoutMs(100000);
-
- Map<String,Integer> constraints = client.listConstraints(creds, table);
- while (!constraints.containsKey(NumericValueConstraint.class.getName())) {
- log.info("Constraints don't contain NumericValueConstraint");
- Thread.sleep(2000);
- constraints = client.listConstraints(creds, table);
- }
-
- boolean success = false;
- for (int i = 0; i < 15; i++) {
- String batchWriter = client.createWriter(creds, table, writerOptions);
- client.update(batchWriter, mutation("row1", "cf", "cq", "x"));
- client.update(batchWriter, mutation("row1", "cf", "cq", "x"));
- try {
- client.flush(batchWriter);
- log.debug("Constraint failed to fire. Waiting and retrying");
- Thread.sleep(5000);
- continue;
- } catch (MutationsRejectedException ex) {}
- try {
- client.closeWriter(batchWriter);
- log.debug("Constraint failed to fire. Waiting and retrying");
- Thread.sleep(5000);
- continue;
- } catch (MutationsRejectedException e) {}
- success = true;
- break;
- }
-
- if (!success) {
- fail("constraint did not fire");
- }
-
- client.removeConstraint(creds, table, 2);
-
- constraints = client.listConstraints(creds, table);
- while (constraints.containsKey(NumericValueConstraint.class.getName())) {
- log.info("Constraints still contains NumericValueConstraint");
- Thread.sleep(2000);
- constraints = client.listConstraints(creds, table);
- }
-
- assertScan(new String[][] {}, table);
-
- UtilWaitThread.sleep(ZOOKEEPER_PROPAGATION_TIME);
-
- writerOptions = new WriterOptions();
- writerOptions.setLatencyMs(10000);
- writerOptions.setMaxMemory(3000);
- writerOptions.setThreads(1);
- writerOptions.setTimeoutMs(100000);
-
- success = false;
- for (int i = 0; i < 15; i++) {
- try {
- String batchWriter = client.createWriter(creds, table, writerOptions);
-
- client.update(batchWriter, mutation("row1", "cf", "cq", "x"));
- client.flush(batchWriter);
- client.closeWriter(batchWriter);
- success = true;
- break;
- } catch (MutationsRejectedException e) {
- log.info("Mutations were rejected, assuming constraint is still active", e);
- Thread.sleep(5000);
- }
- }
-
- if (!success) {
- fail("Failed to successfully write data after constraint was removed");
- }
-
- assertScan(new String[][] {{"row1", "cf", "cq", "x"}}, table);
-
- client.deleteTable(creds, table);
- }
-
- @Test
- public void testTableConstraints() throws Exception {
- log.debug("Setting NumericValueConstraint on " + table);
-
- // constraints
- client.addConstraint(creds, table, NumericValueConstraint.class.getName());
-
- // zookeeper propagation time
- Thread.sleep(ZOOKEEPER_PROPAGATION_TIME);
-
- log.debug("Attempting to verify client-side that constraints are observed");
-
- Map<String,Integer> constraints = client.listConstraints(creds, table);
- while (!constraints.containsKey(NumericValueConstraint.class.getName())) {
- log.debug("Constraints don't contain NumericValueConstraint");
- Thread.sleep(2000);
- constraints = client.listConstraints(creds, table);
- }
-
- assertEquals(2, client.listConstraints(creds, table).size());
- log.debug("Verified client-side that constraints exist");
-
- // Write data that satisfies the constraint
- client.updateAndFlush(creds, table, mutation("row1", "cf", "cq", "123"));
-
- log.debug("Successfully wrote data that satisfies the constraint");
- log.debug("Trying to write data that the constraint should reject");
-
- // Expect failure on data that fails the constraint
- while (true) {
- try {
- client.updateAndFlush(creds, table, mutation("row1", "cf", "cq", "x"));
- log.debug("Expected mutation to be rejected, but was not. Waiting and retrying");
- Thread.sleep(5000);
- } catch (MutationsRejectedException ex) {
- break;
- }
- }
-
- log.debug("Saw expected failure on data which fails the constraint");
-
- log.debug("Removing constraint from table");
- client.removeConstraint(creds, table, 2);
-
- UtilWaitThread.sleep(ZOOKEEPER_PROPAGATION_TIME);
-
- constraints = client.listConstraints(creds, table);
- while (constraints.containsKey(NumericValueConstraint.class.getName())) {
- log.debug("Constraints contains NumericValueConstraint");
- Thread.sleep(2000);
- constraints = client.listConstraints(creds, table);
- }
-
- assertEquals(1, client.listConstraints(creds, table).size());
- log.debug("Verified client-side that the constraint was removed");
-
- log.debug("Attempting to write mutation that should succeed after constraints was removed");
- // Make sure we can write the data after we removed the constraint
- while (true) {
- try {
- client.updateAndFlush(creds, table, mutation("row1", "cf", "cq", "x"));
- break;
- } catch (MutationsRejectedException ex) {
- log.debug("Expected mutation accepted, but was not. Waiting and retrying");
- Thread.sleep(5000);
- }
- }
-
- log.debug("Verifying that record can be read from the table");
- assertScan(new String[][] {{"row1", "cf", "cq", "x"}}, table);
- }
-
- @Test
- public void tableMergesAndSplits() throws Exception {
- // add some splits
- client.addSplits(creds, table, new HashSet<ByteBuffer>(Arrays.asList(s2bb("a"), s2bb("m"), s2bb("z"))));
- List<ByteBuffer> splits = client.listSplits(creds, table, 1);
- assertEquals(Arrays.asList(s2bb("m")), splits);
-
- // Merge some of the splits away
- client.mergeTablets(creds, table, null, s2bb("m"));
- splits = client.listSplits(creds, table, 10);
- assertEquals(Arrays.asList(s2bb("m"), s2bb("z")), splits);
-
- // Merge the entire table
- client.mergeTablets(creds, table, null, null);
- splits = client.listSplits(creds, table, 10);
- List<ByteBuffer> empty = Collections.emptyList();
-
- // No splits after merge on whole table
- assertEquals(empty, splits);
- }
-
- @Test
- public void iteratorFunctionality() throws Exception {
- // iterators
- HashMap<String,String> options = new HashMap<String,String>();
- options.put("type", "STRING");
- options.put("columns", "cf");
- IteratorSetting setting = new IteratorSetting(10, table, SummingCombiner.class.getName(), options);
- client.attachIterator(creds, table, setting, EnumSet.allOf(IteratorScope.class));
- for (int i = 0; i < 10; i++) {
- client.updateAndFlush(creds, table, mutation("row1", "cf", "cq", "1"));
- }
- // 10 updates of "1" in the value w/ SummingCombiner should return value of "10"
- assertScan(new String[][] {{"row1", "cf", "cq", "10"}}, table);
-
- try {
- client.checkIteratorConflicts(creds, table, setting, EnumSet.allOf(IteratorScope.class));
- fail("checkIteratorConflicts did not throw an exception");
- } catch (Exception ex) {
- // Expected
- }
- client.deleteRows(creds, table, null, null);
- client.removeIterator(creds, table, "test", EnumSet.allOf(IteratorScope.class));
- String expected[][] = new String[10][];
- for (int i = 0; i < 10; i++) {
- client.updateAndFlush(creds, table, mutation("row" + i, "cf", "cq", "" + i));
- expected[i] = new String[] {"row" + i, "cf", "cq", "" + i};
- client.flushTable(creds, table, null, null, true);
- }
- assertScan(expected, table);
- }
-
- @Test
- public void cloneTable() throws Exception {
- String TABLE_TEST2 = getUniqueNames(2)[1];
-
- String expected[][] = new String[10][];
- for (int i = 0; i < 10; i++) {
- client.updateAndFlush(creds, table, mutation("row" + i, "cf", "cq", "" + i));
- expected[i] = new String[] {"row" + i, "cf", "cq", "" + i};
- client.flushTable(creds, table, null, null, true);
- }
- assertScan(expected, table);
-
- // clone
- client.cloneTable(creds, table, TABLE_TEST2, true, null, null);
- assertScan(expected, TABLE_TEST2);
- client.deleteTable(creds, TABLE_TEST2);
- }
-
- @Test
- public void clearLocatorCache() throws Exception {
- // don't know how to test this, call it just for fun
- client.clearLocatorCache(creds, table);
- }
-
- @Test
- public void compactTable() throws Exception {
- String expected[][] = new String[10][];
- for (int i = 0; i < 10; i++) {
- client.updateAndFlush(creds, table, mutation("row" + i, "cf", "cq", "" + i));
- expected[i] = new String[] {"row" + i, "cf", "cq", "" + i};
- client.flushTable(creds, table, null, null, true);
- }
- assertScan(expected, table);
-
- // compact
- client.compactTable(creds, table, null, null, null, true, true, null);
- assertEquals(1, countFiles(table));
- assertScan(expected, table);
- }
-
- @Test
- public void diskUsage() throws Exception {
- String TABLE_TEST2 = getUniqueNames(2)[1];
-
- // Write some data
- String expected[][] = new String[10][];
- for (int i = 0; i < 10; i++) {
- client.updateAndFlush(creds, table, mutation("row" + i, "cf", "cq", "" + i));
- expected[i] = new String[] {"row" + i, "cf", "cq", "" + i};
- client.flushTable(creds, table, null, null, true);
- }
- assertScan(expected, table);
-
- // compact
- client.compactTable(creds, table, null, null, null, true, true, null);
- assertEquals(1, countFiles(table));
- assertScan(expected, table);
-
- // Clone the table
- client.cloneTable(creds, table, TABLE_TEST2, true, null, null);
- Set<String> tablesToScan = new HashSet<String>();
- tablesToScan.add(table);
- tablesToScan.add(TABLE_TEST2);
- tablesToScan.add("foo");
-
- client.createTable(creds, "foo", true, TimeType.MILLIS);
-
- // get disk usage
- List<DiskUsage> diskUsage = (client.getDiskUsage(creds, tablesToScan));
- assertEquals(2, diskUsage.size());
- // The original table and the clone are lumped together (they share the same files)
- assertEquals(2, diskUsage.get(0).getTables().size());
- // The empty table we created
- assertEquals(1, diskUsage.get(1).getTables().size());
-
- // Compact the clone so it writes its own files instead of referring to the original
- client.compactTable(creds, TABLE_TEST2, null, null, null, true, true, null);
-
- diskUsage = (client.getDiskUsage(creds, tablesToScan));
- assertEquals(3, diskUsage.size());
- // The original
- assertEquals(1, diskUsage.get(0).getTables().size());
- // The clone w/ its own files now
- assertEquals(1, diskUsage.get(1).getTables().size());
- // The empty table
- assertEquals(1, diskUsage.get(2).getTables().size());
- client.deleteTable(creds, "foo");
- client.deleteTable(creds, TABLE_TEST2);
- }
-
- @Test
- public void importExportTable() throws Exception {
- // Write some data
- String expected[][] = new String[10][];
- for (int i = 0; i < 10; i++) {
- client.updateAndFlush(creds, table, mutation("row" + i, "cf", "cq", "" + i));
- expected[i] = new String[] {"row" + i, "cf", "cq", "" + i};
- client.flushTable(creds, table, null, null, true);
- }
- assertScan(expected, table);
-
- // export/import
- MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
- FileSystem fs = cluster.getFileSystem();
- Path base = cluster.getTemporaryPath();
- Path dir = new Path(base, "test");
- assertTrue(fs.mkdirs(dir));
- Path destDir = new Path(base, "test_dest");
- assertTrue(fs.mkdirs(destDir));
- client.offlineTable(creds, table, false);
- client.exportTable(creds, table, dir.toString());
- // copy files to a new location
- FSDataInputStream is = fs.open(new Path(dir, "distcp.txt"));
- try (BufferedReader r = new BufferedReader(new InputStreamReader(is))) {
- while (true) {
- String line = r.readLine();
- if (line == null)
- break;
- Path srcPath = new Path(line);
- FileUtil.copy(fs, srcPath, fs, destDir, false, fs.getConf());
- }
- }
- client.deleteTable(creds, table);
- client.importTable(creds, "testify", destDir.toString());
- assertScan(expected, "testify");
- client.deleteTable(creds, "testify");
-
- try {
- // ACCUMULO-1558 a second import from the same dir should fail, the first import moved the files
- client.importTable(creds, "testify2", destDir.toString());
- fail();
- } catch (Exception e) {}
-
- assertFalse(client.listTables(creds).contains("testify2"));
- }
-
- @Test
- public void localityGroups() throws Exception {
- Map<String,Set<String>> groups = new HashMap<String,Set<String>>();
- groups.put("group1", Collections.singleton("cf1"));
- groups.put("group2", Collections.singleton("cf2"));
- client.setLocalityGroups(creds, table, groups);
- assertEquals(groups, client.getLocalityGroups(creds, table));
- }
-
- @Test
- public void tableProperties() throws Exception {
- Map<String,String> systemProps = client.getSystemConfiguration(creds);
- String systemTableSplitThreshold = systemProps.get("table.split.threshold");
-
- Map<String,String> orig = client.getTableProperties(creds, table);
- client.setTableProperty(creds, table, "table.split.threshold", "500M");
-
- // Get the new table property value
- Map<String,String> update = client.getTableProperties(creds, table);
- assertEquals(update.get("table.split.threshold"), "500M");
-
- // Table level properties shouldn't affect system level values
- assertEquals(systemTableSplitThreshold, client.getSystemConfiguration(creds).get("table.split.threshold"));
-
- client.removeTableProperty(creds, table, "table.split.threshold");
- update = client.getTableProperties(creds, table);
- assertEquals(orig, update);
- }
-
- @Test
- public void tableRenames() throws Exception {
- // rename table
- Map<String,String> tables = client.tableIdMap(creds);
- client.renameTable(creds, table, "bar");
- Map<String,String> tables2 = client.tableIdMap(creds);
- assertEquals(tables.get(table), tables2.get("bar"));
- // table exists
- assertTrue(client.tableExists(creds, "bar"));
- assertFalse(client.tableExists(creds, table));
- client.renameTable(creds, "bar", table);
- }
-
- @Test
- public void bulkImport() throws Exception {
- MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
- FileSystem fs = cluster.getFileSystem();
- Path base = cluster.getTemporaryPath();
- Path dir = new Path(base, "test");
- assertTrue(fs.mkdirs(dir));
-
- // Write an RFile
- String filename = dir + "/bulk/import/rfile.rf";
- FileSKVWriter writer = FileOperations.getInstance().openWriter(filename, fs, fs.getConf(), DefaultConfiguration.getInstance());
- writer.startDefaultLocalityGroup();
- writer.append(new org.apache.accumulo.core.data.Key(new Text("a"), new Text("b"), new Text("c")), new Value("value".getBytes()));
- writer.close();
-
- // Create failures directory
- fs.mkdirs(new Path(dir + "/bulk/fail"));
-
- // Run the bulk import
- client.importDirectory(creds, table, dir + "/bulk/import", dir + "/bulk/fail", true);
-
- // Make sure we find the data
- String scanner = client.createScanner(creds, table, null);
- ScanResult more = client.nextK(scanner, 100);
- client.closeScanner(scanner);
- assertEquals(1, more.results.size());
- ByteBuffer maxRow = client.getMaxRow(creds, table, null, null, false, null, false);
- assertEquals(s2bb("a"), maxRow);
- }
-
- @Test
- public void testTableClassLoad() throws Exception {
- assertFalse(client.testTableClassLoad(creds, table, "abc123", SortedKeyValueIterator.class.getName()));
- assertTrue(client.testTableClassLoad(creds, table, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName()));
- }
-
- private Condition newCondition(String cf, String cq) {
- return new Condition(new Column(s2bb(cf), s2bb(cq), s2bb("")));
- }
-
- private Condition newCondition(String cf, String cq, String val) {
- return newCondition(cf, cq).setValue(s2bb(val));
- }
-
- private Condition newCondition(String cf, String cq, long ts, String val) {
- return newCondition(cf, cq).setValue(s2bb(val)).setTimestamp(ts);
- }
-
- private ColumnUpdate newColUpdate(String cf, String cq, String val) {
- return new ColumnUpdate(s2bb(cf), s2bb(cq)).setValue(s2bb(val));
- }
-
- private ColumnUpdate newColUpdate(String cf, String cq, long ts, String val) {
- return new ColumnUpdate(s2bb(cf), s2bb(cq)).setTimestamp(ts).setValue(s2bb(val));
- }
-
- private void assertScan(String[][] expected, String table) throws Exception {
- String scid = client.createScanner(creds, table, new ScanOptions());
- ScanResult keyValues = client.nextK(scid, expected.length + 1);
-
- assertEquals("Saw " + keyValues.results, expected.length, keyValues.results.size());
- assertFalse(keyValues.more);
-
- for (int i = 0; i < keyValues.results.size(); i++) {
- checkKey(expected[i][0], expected[i][1], expected[i][2], expected[i][3], keyValues.results.get(i));
- }
-
- client.closeScanner(scid);
- }
-
- @Test
- public void testConditionalWriter() throws Exception {
- log.debug("Adding constraint {} to {}", table, NumericValueConstraint.class.getName());
- client.addConstraint(creds, table, NumericValueConstraint.class.getName());
- UtilWaitThread.sleep(ZOOKEEPER_PROPAGATION_TIME);
-
- while (!client.listConstraints(creds, table).containsKey(NumericValueConstraint.class.getName())) {
- log.info("Failed to see constraint");
- Thread.sleep(1000);
- }
-
- String cwid = client.createConditionalWriter(creds, table, new ConditionalWriterOptions());
-
- Map<ByteBuffer,ConditionalUpdates> updates = new HashMap<ByteBuffer,ConditionalUpdates>();
-
- updates.put(
- s2bb("00345"),
- new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq")), Arrays.asList(newColUpdate("meta", "seq", 10, "1"),
- newColUpdate("data", "img", "73435435"))));
-
- Map<ByteBuffer,ConditionalStatus> results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(1, results.size());
- assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00345")));
-
- assertScan(new String[][] { {"00345", "data", "img", "73435435"}, {"00345", "meta", "seq", "1"}}, table);
-
- // test not setting values on conditions
- updates.clear();
-
- updates.put(s2bb("00345"), new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq")), Arrays.asList(newColUpdate("meta", "seq", "2"))));
- updates.put(s2bb("00346"), new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq")), Arrays.asList(newColUpdate("meta", "seq", "1"))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(2, results.size());
- assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00345")));
- assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00346")));
-
- assertScan(new String[][] { {"00345", "data", "img", "73435435"}, {"00345", "meta", "seq", "1"}, {"00346", "meta", "seq", "1"}}, table);
-
- // test setting values on conditions
- updates.clear();
-
- updates.put(
- s2bb("00345"),
- new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq", "1")), Arrays.asList(newColUpdate("meta", "seq", 20, "2"),
- newColUpdate("data", "img", "567890"))));
-
- updates.put(s2bb("00346"), new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq", "2")), Arrays.asList(newColUpdate("meta", "seq", "3"))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(2, results.size());
- assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00345")));
- assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00346")));
-
- assertScan(new String[][] { {"00345", "data", "img", "567890"}, {"00345", "meta", "seq", "2"}, {"00346", "meta", "seq", "1"}}, table);
-
- // test setting timestamp on condition to a non-existant version
- updates.clear();
-
- updates.put(
- s2bb("00345"),
- new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq", 10, "2")), Arrays.asList(newColUpdate("meta", "seq", 30, "3"),
- newColUpdate("data", "img", "1234567890"))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(1, results.size());
- assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00345")));
-
- assertScan(new String[][] { {"00345", "data", "img", "567890"}, {"00345", "meta", "seq", "2"}, {"00346", "meta", "seq", "1"}}, table);
-
- // test setting timestamp to an existing version
-
- updates.clear();
-
- updates.put(
- s2bb("00345"),
- new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq", 20, "2")), Arrays.asList(newColUpdate("meta", "seq", 30, "3"),
- newColUpdate("data", "img", "1234567890"))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(1, results.size());
- assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00345")));
-
- assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"}}, table);
-
- // run test w/ condition that has iterators
- // following should fail w/o iterator
- client.updateAndFlush(creds, table, Collections.singletonMap(s2bb("00347"), Arrays.asList(newColUpdate("data", "count", "1"))));
- client.updateAndFlush(creds, table, Collections.singletonMap(s2bb("00347"), Arrays.asList(newColUpdate("data", "count", "1"))));
- client.updateAndFlush(creds, table, Collections.singletonMap(s2bb("00347"), Arrays.asList(newColUpdate("data", "count", "1"))));
-
- updates.clear();
- updates.put(s2bb("00347"),
- new ConditionalUpdates(Arrays.asList(newCondition("data", "count", "3")), Arrays.asList(newColUpdate("data", "img", "1234567890"))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(1, results.size());
- assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00347")));
-
- assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
- {"00347", "data", "count", "1"}}, table);
-
- // following test w/ iterator setup should succeed
- Condition iterCond = newCondition("data", "count", "3");
- Map<String,String> props = new HashMap<String,String>();
- props.put("type", "STRING");
- props.put("columns", "data:count");
- IteratorSetting is = new IteratorSetting(1, "sumc", SummingCombiner.class.getName(), props);
- iterCond.setIterators(Arrays.asList(is));
-
- updates.clear();
- updates.put(s2bb("00347"), new ConditionalUpdates(Arrays.asList(iterCond), Arrays.asList(newColUpdate("data", "img", "1234567890"))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(1, results.size());
- assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00347")));
-
- assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
- {"00347", "data", "count", "1"}, {"00347", "data", "img", "1234567890"}}, table);
-
- ConditionalStatus status = null;
- for (int i = 0; i < 30; i++) {
- // test a mutation that violated a constraint
- updates.clear();
- updates.put(s2bb("00347"),
- new ConditionalUpdates(Arrays.asList(newCondition("data", "img", "1234567890")), Arrays.asList(newColUpdate("data", "count", "A"))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(1, results.size());
- status = results.get(s2bb("00347"));
- if (ConditionalStatus.VIOLATED != status) {
- log.info("ConditionalUpdate was not rejected by server due to table constraint. Sleeping and retrying");
- Thread.sleep(5000);
- continue;
- }
-
- assertEquals(ConditionalStatus.VIOLATED, status);
- break;
- }
-
- // Final check to make sure we succeeded and didn't exceed the retries
- assertEquals(ConditionalStatus.VIOLATED, status);
-
- assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
- {"00347", "data", "count", "1"}, {"00347", "data", "img", "1234567890"}}, table);
-
- // run test with two conditions
- // both conditions should fail
- updates.clear();
- updates.put(
- s2bb("00347"),
- new ConditionalUpdates(Arrays.asList(newCondition("data", "img", "565"), newCondition("data", "count", "2")), Arrays.asList(
- newColUpdate("data", "count", "3"), newColUpdate("data", "img", "0987654321"))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(1, results.size());
- assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00347")));
-
- assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
- {"00347", "data", "count", "1"}, {"00347", "data", "img", "1234567890"}}, table);
-
- // one condition should fail
- updates.clear();
- updates.put(
- s2bb("00347"),
- new ConditionalUpdates(Arrays.asList(newCondition("data", "img", "1234567890"), newCondition("data", "count", "2")), Arrays.asList(
- newColUpdate("data", "count", "3"), newColUpdate("data", "img", "0987654321"))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(1, results.size());
- assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00347")));
-
- assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
- {"00347", "data", "count", "1"}, {"00347", "data", "img", "1234567890"}}, table);
-
- // one condition should fail
- updates.clear();
- updates.put(
- s2bb("00347"),
- new ConditionalUpdates(Arrays.asList(newCondition("data", "img", "565"), newCondition("data", "count", "1")), Arrays.asList(
- newColUpdate("data", "count", "3"), newColUpdate("data", "img", "0987654321"))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(1, results.size());
- assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00347")));
-
- assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
- {"00347", "data", "count", "1"}, {"00347", "data", "img", "1234567890"}}, table);
-
- // both conditions should succeed
-
- ConditionalStatus result = client.updateRowConditionally(
- creds,
- table,
- s2bb("00347"),
- new ConditionalUpdates(Arrays.asList(newCondition("data", "img", "1234567890"), newCondition("data", "count", "1")), Arrays.asList(
- newColUpdate("data", "count", "3"), newColUpdate("data", "img", "0987654321"))));
-
- assertEquals(ConditionalStatus.ACCEPTED, result);
-
- assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
- {"00347", "data", "count", "3"}, {"00347", "data", "img", "0987654321"}}, table);
-
- client.closeConditionalWriter(cwid);
- try {
- client.updateRowsConditionally(cwid, updates);
- fail("conditional writer not closed");
- } catch (UnknownWriter uk) {}
-
- String principal;
- ClusterUser cwuser = null;
- if (isKerberosEnabled()) {
- cwuser = getKdc().getClientPrincipal(1);
- principal = cwuser.getPrincipal();
- client.createLocalUser(creds, principal, s2bb("unused"));
-
- } else {
- principal = "cwuser";
- // run test with colvis
- client.createLocalUser(creds, principal, s2bb("bestpasswordever"));
- }
-
- client.changeUserAuthorizations(creds, principal, Collections.singleton(s2bb("A")));
- client.grantTablePermission(creds, principal, table, TablePermission.WRITE);
- client.grantTablePermission(creds, principal, table, TablePermission.READ);
-
- TestProxyClient cwuserProxyClient = null;
- Client origClient = null;
- Map<String,String> cwProperties;
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(cwuser.getPrincipal(), cwuser.getKeytab().getAbsolutePath());
- final UserGroupInformation cwuserUgi = UserGroupInformation.getCurrentUser();
- // Re-login in and make a new connection. Can't use the previous one
- cwuserProxyClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, cwuserUgi);
- origClient = client;
- client = cwuserProxyClient.proxy();
- cwProperties = Collections.emptyMap();
- } else {
- cwProperties = Collections.singletonMap("password", "bestpasswordever");
- }
-
- try {
- ByteBuffer cwCreds = client.login(principal, cwProperties);
-
- cwid = client.createConditionalWriter(cwCreds, table, new ConditionalWriterOptions().setAuthorizations(Collections.singleton(s2bb("A"))));
-
- updates.clear();
- updates.put(
- s2bb("00348"),
- new ConditionalUpdates(Arrays.asList(new Condition(new Column(s2bb("data"), s2bb("c"), s2bb("A")))), Arrays.asList(newColUpdate("data", "seq", "1"),
- newColUpdate("data", "c", "1").setColVisibility(s2bb("A")))));
- updates
- .put(
- s2bb("00349"),
- new ConditionalUpdates(Arrays.asList(new Condition(new Column(s2bb("data"), s2bb("c"), s2bb("B")))), Arrays.asList(newColUpdate("data", "seq",
- "1"))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(2, results.size());
- assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00348")));
- assertEquals(ConditionalStatus.INVISIBLE_VISIBILITY, results.get(s2bb("00349")));
-
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- client = origClient;
- }
- // Verify that the original user can't see the updates with visibilities set
- assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
- {"00347", "data", "count", "3"}, {"00347", "data", "img", "0987654321"}, {"00348", "data", "seq", "1"}}, table);
-
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(cwuser.getPrincipal(), cwuser.getKeytab().getAbsolutePath());
- client = cwuserProxyClient.proxy();
- }
-
- updates.clear();
-
- updates.clear();
- updates.put(s2bb("00348"), new ConditionalUpdates(Arrays.asList(new Condition(new Column(s2bb("data"), s2bb("c"), s2bb("A"))).setValue(s2bb("0"))),
- Arrays.asList(newColUpdate("data", "seq", "2"), newColUpdate("data", "c", "2").setColVisibility(s2bb("A")))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(1, results.size());
- assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00348")));
-
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- client = origClient;
- }
-
- // Same results as the original user
- assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
- {"00347", "data", "count", "3"}, {"00347", "data", "img", "0987654321"}, {"00348", "data", "seq", "1"}}, table);
-
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(cwuser.getPrincipal(), cwuser.getKeytab().getAbsolutePath());
- client = cwuserProxyClient.proxy();
- }
-
- updates.clear();
- updates.put(s2bb("00348"), new ConditionalUpdates(Arrays.asList(new Condition(new Column(s2bb("data"), s2bb("c"), s2bb("A"))).setValue(s2bb("1"))),
- Arrays.asList(newColUpdate("data", "seq", "2"), newColUpdate("data", "c", "2").setColVisibility(s2bb("A")))));
-
- results = client.updateRowsConditionally(cwid, updates);
-
- assertEquals(1, results.size());
- assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00348")));
-
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
- client = origClient;
- }
-
- assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
- {"00347", "data", "count", "3"}, {"00347", "data", "img", "0987654321"}, {"00348", "data", "seq", "2"}}, table);
-
- if (isKerberosEnabled()) {
- UserGroupInformation.loginUserFromKeytab(cwuser.getPrincipal(), cwuser.getKeytab().getAbsolutePath());
- client = cwuserProxyClient.proxy();
- }
-
- client.closeConditionalWriter(cwid);
- try {
- client.updateRowsConditionally(cwid, updates);
- fail("conditional writer not closed");
- } catch (UnknownWriter uk) {}
- } finally {
- if (isKerberosEnabled()) {
- // Clos
<TRUNCATED>
[30/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
new file mode 100644
index 0000000..4ef2958
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -0,0 +1,456 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.cluster.ClusterControl;
+import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.rfile.PrintInfo;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MonitorUtil;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.ZooCache;
+import org.apache.accumulo.fate.zookeeper.ZooLock;
+import org.apache.accumulo.fate.zookeeper.ZooReader;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.TestMultiTableIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+import com.google.common.collect.Iterators;
+
+public class ReadWriteIT extends AccumuloClusterHarness {
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ }
+
+ private static final Logger log = LoggerFactory.getLogger(ReadWriteIT.class);
+
+ static final int ROWS = 200000;
+ static final int COLS = 1;
+ static final String COLF = "colf";
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 6 * 60;
+ }
+
+ @Test(expected = RuntimeException.class)
+ public void invalidInstanceName() throws Exception {
+ final Connector conn = getConnector();
+ new ZooKeeperInstance("fake_instance_name", conn.getInstance().getZooKeepers());
+ }
+
+ @Test
+ public void sunnyDay() throws Exception {
+ // Start accumulo, create a table, insert some data, verify we can read it out.
+ // Shutdown cleanly.
+ log.debug("Starting Monitor");
+ cluster.getClusterControl().startAllServers(ServerType.MONITOR);
+ Connector connector = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
+ verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
+ String monitorLocation = null;
+ while (null == monitorLocation) {
+ monitorLocation = MonitorUtil.getLocation(getConnector().getInstance());
+ if (null == monitorLocation) {
+ log.debug("Could not fetch monitor HTTP address from zookeeper");
+ Thread.sleep(2000);
+ }
+ }
+ URL url = new URL("http://" + monitorLocation);
+ log.debug("Fetching web page " + url);
+ String result = FunctionalTestUtils.readAll(url.openStream());
+ assertTrue(result.length() > 100);
+ log.debug("Stopping accumulo cluster");
+ ClusterControl control = cluster.getClusterControl();
+ control.adminStopAll();
+ ZooReader zreader = new ZooReader(connector.getInstance().getZooKeepers(), connector.getInstance().getZooKeepersSessionTimeOut());
+ ZooCache zcache = new ZooCache(zreader, null);
+ byte[] masterLockData;
+ do {
+ masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(connector.getInstance()) + Constants.ZMASTER_LOCK, null);
+ if (null != masterLockData) {
+ log.info("Master lock is still held");
+ Thread.sleep(1000);
+ }
+ } while (null != masterLockData);
+
+ control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
+ control.stopAllServers(ServerType.MONITOR);
+ control.stopAllServers(ServerType.TRACER);
+ log.debug("success!");
+ // Restarting everything
+ cluster.start();
+ }
+
+ public static void ingest(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String tableName)
+ throws Exception {
+ ingest(connector, clientConfig, principal, rows, cols, width, offset, COLF, tableName);
+ }
+
+ public static void ingest(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String colf,
+ String tableName) throws Exception {
+ TestIngest.Opts opts = new TestIngest.Opts();
+ opts.rows = rows;
+ opts.cols = cols;
+ opts.dataSize = width;
+ opts.startRow = offset;
+ opts.columnFamily = colf;
+ opts.createTable = true;
+ opts.setTableName(tableName);
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConfig);
+ } else {
+ opts.setPrincipal(principal);
+ }
+
+ TestIngest.ingest(connector, opts, new BatchWriterOpts());
+ }
+
+ public static void verify(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String tableName)
+ throws Exception {
+ verify(connector, clientConfig, principal, rows, cols, width, offset, COLF, tableName);
+ }
+
+ private static void verify(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String colf,
+ String tableName) throws Exception {
+ ScannerOpts scannerOpts = new ScannerOpts();
+ VerifyIngest.Opts opts = new VerifyIngest.Opts();
+ opts.rows = rows;
+ opts.cols = cols;
+ opts.dataSize = width;
+ opts.startRow = offset;
+ opts.columnFamily = colf;
+ opts.setTableName(tableName);
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConfig);
+ } else {
+ opts.setPrincipal(principal);
+ }
+
+ VerifyIngest.verifyIngest(connector, opts, scannerOpts);
+ }
+
+ public static String[] args(String... args) {
+ return args;
+ }
+
+ @Test
+ public void multiTableTest() throws Exception {
+ // Write to multiple tables
+ final String instance = cluster.getInstanceName();
+ final String keepers = cluster.getZooKeepers();
+ final ClusterControl control = cluster.getClusterControl();
+ final String prefix = getClass().getSimpleName() + "_" + testName.getMethodName();
+ ExecutorService svc = Executors.newFixedThreadPool(2);
+ Future<Integer> p1 = svc.submit(new Callable<Integer>() {
+ @Override
+ public Integer call() {
+ try {
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ // Invocation is different for SASL. We're only logged in via this processes memory (not via some credentials cache on disk)
+ // Need to pass along the keytab because of that.
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ String principal = getAdminPrincipal();
+ AuthenticationToken token = getAdminToken();
+ assertTrue("Expected KerberosToken, but was " + token.getClass(), token instanceof KerberosToken);
+ KerberosToken kt = (KerberosToken) token;
+ assertNotNull("Expected keytab in token", kt.getKeytab());
+ return control.exec(
+ TestMultiTableIngest.class,
+ args("--count", Integer.toString(ROWS), "-i", instance, "-z", keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab().getAbsolutePath(),
+ "-u", principal));
+ }
+
+ return control.exec(
+ TestMultiTableIngest.class,
+ args("--count", Integer.toString(ROWS), "-u", getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(
+ ((PasswordToken) getAdminToken()).getPassword(), Charsets.UTF_8), "--tablePrefix", prefix));
+ } catch (IOException e) {
+ log.error("Error running MultiTableIngest", e);
+ return -1;
+ }
+ }
+ });
+ Future<Integer> p2 = svc.submit(new Callable<Integer>() {
+ @Override
+ public Integer call() {
+ try {
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ // Invocation is different for SASL. We're only logged in via this processes memory (not via some credentials cache on disk)
+ // Need to pass along the keytab because of that.
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ String principal = getAdminPrincipal();
+ AuthenticationToken token = getAdminToken();
+ assertTrue("Expected KerberosToken, but was " + token.getClass(), token instanceof KerberosToken);
+ KerberosToken kt = (KerberosToken) token;
+ assertNotNull("Expected keytab in token", kt.getKeytab());
+ return control.exec(
+ TestMultiTableIngest.class,
+ args("--count", Integer.toString(ROWS), "--readonly", "-i", instance, "-z", keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab()
+ .getAbsolutePath(), "-u", principal));
+ }
+
+ return control.exec(
+ TestMultiTableIngest.class,
+ args("--count", Integer.toString(ROWS), "--readonly", "-u", getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(
+ ((PasswordToken) getAdminToken()).getPassword(), Charsets.UTF_8), "--tablePrefix", prefix));
+ } catch (IOException e) {
+ log.error("Error running MultiTableIngest", e);
+ return -1;
+ }
+ }
+ });
+ svc.shutdown();
+ while (!svc.isTerminated()) {
+ svc.awaitTermination(15, TimeUnit.SECONDS);
+ }
+ assertEquals(0, p1.get().intValue());
+ assertEquals(0, p2.get().intValue());
+ }
+
+ @Test
+ public void largeTest() throws Exception {
+ // write a few large values
+ Connector connector = getConnector();
+ String table = getUniqueNames(1)[0];
+ ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2, 1, 500000, 0, table);
+ verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2, 1, 500000, 0, table);
+ }
+
+ @Test
+ public void interleaved() throws Exception {
+ // read and write concurrently
+ final Connector connector = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ interleaveTest(connector, tableName);
+ }
+
+ static void interleaveTest(final Connector connector, final String tableName) throws Exception {
+ final AtomicBoolean fail = new AtomicBoolean(false);
+ final int CHUNKSIZE = ROWS / 10;
+ ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, 0, tableName);
+ int i;
+ for (i = 0; i < ROWS; i += CHUNKSIZE) {
+ final int start = i;
+ Thread verify = new Thread() {
+ @Override
+ public void run() {
+ try {
+ verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, start, tableName);
+ } catch (Exception ex) {
+ fail.set(true);
+ }
+ }
+ };
+ verify.start();
+ ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, i + CHUNKSIZE, tableName);
+ verify.join();
+ assertFalse(fail.get());
+ }
+ verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, i, tableName);
+ }
+
+ public static Text t(String s) {
+ return new Text(s);
+ }
+
+ public static Mutation m(String row, String cf, String cq, String value) {
+ Mutation m = new Mutation(t(row));
+ m.put(t(cf), t(cq), new Value(value.getBytes()));
+ return m;
+ }
+
+ @Test
+ public void localityGroupPerf() throws Exception {
+ // verify that locality groups can make look-ups faster
+ final Connector connector = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ connector.tableOperations().create(tableName);
+ connector.tableOperations().setProperty(tableName, "table.group.g1", "colf");
+ connector.tableOperations().setProperty(tableName, "table.groups.enabled", "g1");
+ ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
+ connector.tableOperations().compact(tableName, null, null, true, true);
+ BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
+ bw.addMutation(m("zzzzzzzzzzz", "colf2", "cq", "value"));
+ bw.close();
+ long now = System.currentTimeMillis();
+ Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+ scanner.fetchColumnFamily(new Text("colf"));
+ Iterators.size(scanner.iterator());
+ long diff = System.currentTimeMillis() - now;
+ now = System.currentTimeMillis();
+ scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+ scanner.fetchColumnFamily(new Text("colf2"));
+ Iterators.size(scanner.iterator());
+ bw.close();
+ long diff2 = System.currentTimeMillis() - now;
+ assertTrue(diff2 < diff);
+ }
+
+ @Test
+ public void sunnyLG() throws Exception {
+ // create a locality group, write to it and ensure it exists in the RFiles that result
+ final Connector connector = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ connector.tableOperations().create(tableName);
+ Map<String,Set<Text>> groups = new TreeMap<String,Set<Text>>();
+ groups.put("g1", Collections.singleton(t("colf")));
+ connector.tableOperations().setLocalityGroups(tableName, groups);
+ ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
+ verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
+ connector.tableOperations().flush(tableName, null, null, true);
+ BatchScanner bscanner = connector.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
+ String tableId = connector.tableOperations().tableIdMap().get(tableName);
+ bscanner.setRanges(Collections.singletonList(new Range(new Text(tableId + ";"), new Text(tableId + "<"))));
+ bscanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+ boolean foundFile = false;
+ for (Entry<Key,Value> entry : bscanner) {
+ foundFile = true;
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ PrintStream newOut = new PrintStream(baos);
+ PrintStream oldOut = System.out;
+ try {
+ System.setOut(newOut);
+ List<String> args = new ArrayList<>();
+ args.add(entry.getKey().getColumnQualifier().toString());
+ if (ClusterType.STANDALONE == getClusterType() && cluster.getClientConfig().getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ args.add("--config");
+ StandaloneAccumuloCluster sac = (StandaloneAccumuloCluster) cluster;
+ String hadoopConfDir = sac.getHadoopConfDir();
+ args.add(new Path(hadoopConfDir, "core-site.xml").toString());
+ args.add(new Path(hadoopConfDir, "hdfs-site.xml").toString());
+ }
+ log.info("Invoking PrintInfo with " + args);
+ PrintInfo.main(args.toArray(new String[args.size()]));
+ newOut.flush();
+ String stdout = baos.toString();
+ assertTrue(stdout.contains("Locality group : g1"));
+ assertTrue(stdout.contains("families : [colf]"));
+ } finally {
+ newOut.close();
+ System.setOut(oldOut);
+ }
+ }
+ bscanner.close();
+ assertTrue(foundFile);
+ }
+
+ @Test
+ public void localityGroupChange() throws Exception {
+ // Make changes to locality groups and ensure nothing is lostssh
+ final Connector connector = getConnector();
+ String table = getUniqueNames(1)[0];
+ TableOperations to = connector.tableOperations();
+ to.create(table);
+ String[] config = new String[] {"lg1:colf", null, "lg1:colf,xyz", "lg1:colf,xyz;lg2:c1,c2"};
+ int i = 0;
+ for (String cfg : config) {
+ to.setLocalityGroups(table, getGroups(cfg));
+ ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * (i + 1), 1, 50, ROWS * i, table);
+ to.flush(table, null, null, true);
+ verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 0, 1, 50, ROWS * (i + 1), table);
+ i++;
+ }
+ to.delete(table);
+ to.create(table);
+ config = new String[] {"lg1:colf", null, "lg1:colf,xyz", "lg1:colf;lg2:colf",};
+ i = 1;
+ for (String cfg : config) {
+ ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, table);
+ ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, "xyz", table);
+ to.setLocalityGroups(table, getGroups(cfg));
+ to.flush(table, null, null, true);
+ verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, table);
+ verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, "xyz", table);
+ i++;
+ }
+ }
+
+ private Map<String,Set<Text>> getGroups(String cfg) {
+ Map<String,Set<Text>> groups = new TreeMap<String,Set<Text>>();
+ if (cfg != null) {
+ for (String group : cfg.split(";")) {
+ String[] parts = group.split(":");
+ Set<Text> cols = new HashSet<Text>();
+ for (String col : parts[1].split(",")) {
+ cols.add(t(col));
+ }
+ groups.put(parts[1], cols);
+ }
+ }
+ return groups;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java b/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
new file mode 100644
index 0000000..0408aa0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.rfile.CreateEmpty;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * XXX As a part of verifying lossy recovery via inserting an empty rfile, this test deletes test table tablets. This will require write access to the backing
+ * files of the test Accumulo mini cluster.
+ *
+ * This test should read the file location from the test harness and that file should be on the local filesystem. If you want to take a paranoid approach just
+ * make sure the test user doesn't have write access to the HDFS files of any colocated live Accumulo instance or any important local filesystem files..
+ */
+public class RecoveryWithEmptyRFileIT extends ConfigurableMacBase {
+ private static final Logger log = LoggerFactory.getLogger(RecoveryWithEmptyRFileIT.class);
+
+ static final int ROWS = 200000;
+ static final int COLS = 1;
+ static final String COLF = "colf";
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.useMiniDFS(true);
+ }
+
+ @Test
+ public void replaceMissingRFile() throws Exception {
+ log.info("Ingest some data, verify it was stored properly, replace an underlying rfile with an empty one and verify we can scan.");
+ Connector connector = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ ReadWriteIT.ingest(connector, cluster.getClientConfig(), "root", ROWS, COLS, 50, 0, tableName);
+ ReadWriteIT.verify(connector, cluster.getClientConfig(), "root", ROWS, COLS, 50, 0, tableName);
+
+ connector.tableOperations().flush(tableName, null, null, true);
+ connector.tableOperations().offline(tableName, true);
+
+ log.debug("Replacing rfile(s) with empty");
+ Scanner meta = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ String tableId = connector.tableOperations().tableIdMap().get(tableName);
+ meta.setRange(new Range(new Text(tableId + ";"), new Text(tableId + "<")));
+ meta.fetchColumnFamily(DataFileColumnFamily.NAME);
+ boolean foundFile = false;
+ for (Entry<Key,Value> entry : meta) {
+ foundFile = true;
+ Path rfile = new Path(entry.getKey().getColumnQualifier().toString());
+ log.debug("Removing rfile '" + rfile + "'");
+ cluster.getFileSystem().delete(rfile, false);
+ Process info = cluster.exec(CreateEmpty.class, rfile.toString());
+ assertEquals(0, info.waitFor());
+ }
+ meta.close();
+ assertTrue(foundFile);
+
+ log.trace("invalidate cached file handles by issuing a compaction");
+ connector.tableOperations().online(tableName, true);
+ connector.tableOperations().compact(tableName, null, null, false, true);
+
+ log.debug("make sure we can still scan");
+ Scanner scan = connector.createScanner(tableName, Authorizations.EMPTY);
+ scan.setRange(new Range());
+ long cells = 0l;
+ for (Entry<Key,Value> entry : scan) {
+ if (entry != null)
+ cells++;
+ }
+ scan.close();
+ assertEquals(0l, cells);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java b/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
new file mode 100644
index 0000000..a8c5bca
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.test.functional;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.master.balancer.RegexGroupBalancer;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.commons.lang3.mutable.MutableInt;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.HashBasedTable;
+import com.google.common.collect.Table;
+
+public class RegexGroupBalanceIT extends ConfigurableMacBase {
+
+ @Override
+ public void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws Exception {
+ cfg.setNumTservers(4);
+ }
+
+ @Test(timeout = 120000)
+ public void testBalancing() throws Exception {
+ Connector conn = getConnector();
+ String tablename = getUniqueNames(1)[0];
+ conn.tableOperations().create(tablename);
+
+ SortedSet<Text> splits = new TreeSet<>();
+ splits.add(new Text("01a"));
+ splits.add(new Text("01m"));
+ splits.add(new Text("01z"));
+
+ splits.add(new Text("02a"));
+ splits.add(new Text("02f"));
+ splits.add(new Text("02r"));
+ splits.add(new Text("02z"));
+
+ splits.add(new Text("03a"));
+ splits.add(new Text("03f"));
+ splits.add(new Text("03m"));
+ splits.add(new Text("03r"));
+
+ conn.tableOperations().setProperty(tablename, RegexGroupBalancer.REGEX_PROPERTY, "(\\d\\d).*");
+ conn.tableOperations().setProperty(tablename, RegexGroupBalancer.DEFAUT_GROUP_PROPERTY, "03");
+ conn.tableOperations().setProperty(tablename, RegexGroupBalancer.WAIT_TIME_PROPERTY, "50ms");
+ conn.tableOperations().setProperty(tablename, Property.TABLE_LOAD_BALANCER.getKey(), RegexGroupBalancer.class.getName());
+
+ conn.tableOperations().addSplits(tablename, splits);
+
+ while (true) {
+ Thread.sleep(250);
+
+ Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, tablename);
+
+ boolean allGood = true;
+ allGood &= checkGroup(groupLocationCounts, "01", 1, 1, 3);
+ allGood &= checkGroup(groupLocationCounts, "02", 1, 1, 4);
+ allGood &= checkGroup(groupLocationCounts, "03", 1, 2, 4);
+ allGood &= checkTabletsPerTserver(groupLocationCounts, 3, 3, 4);
+
+ if (allGood) {
+ break;
+ }
+ }
+
+ splits.clear();
+ splits.add(new Text("01b"));
+ splits.add(new Text("01f"));
+ splits.add(new Text("01l"));
+ splits.add(new Text("01r"));
+ conn.tableOperations().addSplits(tablename, splits);
+
+ while (true) {
+ Thread.sleep(250);
+
+ Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, tablename);
+
+ boolean allGood = true;
+ allGood &= checkGroup(groupLocationCounts, "01", 1, 2, 4);
+ allGood &= checkGroup(groupLocationCounts, "02", 1, 1, 4);
+ allGood &= checkGroup(groupLocationCounts, "03", 1, 2, 4);
+ allGood &= checkTabletsPerTserver(groupLocationCounts, 4, 4, 4);
+
+ if (allGood) {
+ break;
+ }
+ }
+
+ // merge group 01 down to one tablet
+ conn.tableOperations().merge(tablename, null, new Text("01z"));
+
+ while (true) {
+ Thread.sleep(250);
+
+ Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, tablename);
+
+ boolean allGood = true;
+ allGood &= checkGroup(groupLocationCounts, "01", 1, 1, 1);
+ allGood &= checkGroup(groupLocationCounts, "02", 1, 1, 4);
+ allGood &= checkGroup(groupLocationCounts, "03", 1, 2, 4);
+ allGood &= checkTabletsPerTserver(groupLocationCounts, 2, 3, 4);
+
+ if (allGood) {
+ break;
+ }
+ }
+ }
+
+ private boolean checkTabletsPerTserver(Table<String,String,MutableInt> groupLocationCounts, int minTabletPerTserver, int maxTabletsPerTserver,
+ int totalTservser) {
+ // check that each tserver has between min and max tablets
+ for (Map<String,MutableInt> groups : groupLocationCounts.columnMap().values()) {
+ int sum = 0;
+ for (MutableInt mi : groups.values()) {
+ sum += mi.intValue();
+ }
+
+ if (sum < minTabletPerTserver || sum > maxTabletsPerTserver) {
+ return false;
+ }
+ }
+
+ return groupLocationCounts.columnKeySet().size() == totalTservser;
+ }
+
+ private boolean checkGroup(Table<String,String,MutableInt> groupLocationCounts, String group, int min, int max, int tsevers) {
+ Collection<MutableInt> counts = groupLocationCounts.row(group).values();
+ if (counts.size() == 0) {
+ return min == 0 && max == 0 && tsevers == 0;
+ }
+ return min == Collections.min(counts).intValue() && max == Collections.max(counts).intValue() && counts.size() == tsevers;
+ }
+
+ private Table<String,String,MutableInt> getCounts(Connector conn, String tablename) throws TableNotFoundException {
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
+ String tableId = conn.tableOperations().tableIdMap().get(tablename);
+ s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
+
+ Table<String,String,MutableInt> groupLocationCounts = HashBasedTable.create();
+
+ for (Entry<Key,Value> entry : s) {
+ String group = entry.getKey().getRow().toString();
+ if (group.endsWith("<")) {
+ group = "03";
+ } else {
+ group = group.substring(tableId.length() + 1).substring(0, 2);
+ }
+ String loc = new TServerInstance(entry.getValue(), entry.getKey().getColumnQualifier()).toString();
+
+ MutableInt count = groupLocationCounts.get(group, loc);
+ if (count == null) {
+ count = new MutableInt(0);
+ groupLocationCounts.put(group, loc, count);
+ }
+
+ count.increment();
+ }
+ return groupLocationCounts;
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/RenameIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/RenameIT.java b/test/src/main/java/org/apache/accumulo/test/functional/RenameIT.java
new file mode 100644
index 0000000..0c22196
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/RenameIT.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+public class RenameIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void renameTest() throws Exception {
+ String[] tableNames = getUniqueNames(2);
+ String name1 = tableNames[0];
+ String name2 = tableNames[1];
+ BatchWriterOpts bwOpts = new BatchWriterOpts();
+ ScannerOpts scanOpts = new ScannerOpts();
+ TestIngest.Opts opts = new TestIngest.Opts();
+ opts.createTable = true;
+ opts.setTableName(name1);
+
+ final ClientConfiguration clientConfig = cluster.getClientConfig();
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConfig);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ }
+
+ Connector c = getConnector();
+ TestIngest.ingest(c, opts, bwOpts);
+ c.tableOperations().rename(name1, name2);
+ TestIngest.ingest(c, opts, bwOpts);
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ vopts.updateKerberosCredentials(clientConfig);
+ } else {
+ vopts.setPrincipal(getAdminPrincipal());
+ }
+
+ vopts.setTableName(name2);
+ VerifyIngest.verifyIngest(c, vopts, scanOpts);
+ c.tableOperations().delete(name1);
+ c.tableOperations().rename(name2, name1);
+ vopts.setTableName(name1);
+ VerifyIngest.verifyIngest(c, vopts, scanOpts);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/RestartIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/RestartIT.java b/test/src/main/java/org/apache/accumulo/test/functional/RestartIT.java
new file mode 100644
index 0000000..39e9bed
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/RestartIT.java
@@ -0,0 +1,367 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.Map.Entry;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.cluster.ClusterControl;
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.ZooCache;
+import org.apache.accumulo.fate.zookeeper.ZooLock;
+import org.apache.accumulo.fate.zookeeper.ZooReader;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+
+public class RestartIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(RestartIT.class);
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 10 * 60;
+ }
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
+ cfg.setProperty(Property.GC_CYCLE_START, "1s");
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ private static final ScannerOpts SOPTS = new ScannerOpts();
+ private static final VerifyIngest.Opts VOPTS = new VerifyIngest.Opts();
+ private static final TestIngest.Opts OPTS = new TestIngest.Opts();
+ private static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
+ static {
+ OPTS.rows = VOPTS.rows = 10 * 1000;
+ }
+
+ private ExecutorService svc;
+
+ @Before
+ public void setup() throws Exception {
+ svc = Executors.newFixedThreadPool(1);
+ }
+
+ @After
+ public void teardown() throws Exception {
+ if (null == svc) {
+ return;
+ }
+
+ if (!svc.isShutdown()) {
+ svc.shutdown();
+ }
+
+ while (!svc.awaitTermination(10, TimeUnit.SECONDS)) {
+ log.info("Waiting for threadpool to terminate");
+ }
+ }
+
+ @Test
+ public void restartMaster() throws Exception {
+ Connector c = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ OPTS.setTableName(tableName);
+ VOPTS.setTableName(tableName);
+ c.tableOperations().create(tableName);
+ final AuthenticationToken token = getAdminToken();
+ final ClusterControl control = getCluster().getClusterControl();
+
+ final String[] args;
+ if (token instanceof PasswordToken) {
+ byte[] password = ((PasswordToken) token).getPassword();
+ args = new String[] {"-u", getAdminPrincipal(), "-p", new String(password, Charsets.UTF_8), "-i", cluster.getInstanceName(), "-z",
+ cluster.getZooKeepers(), "--rows", "" + OPTS.rows, "--table", tableName};
+ OPTS.setPrincipal(getAdminPrincipal());
+ VOPTS.setPrincipal(getAdminPrincipal());
+ } else if (token instanceof KerberosToken) {
+ ClusterUser rootUser = getAdminUser();
+ args = new String[] {"-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z",
+ cluster.getZooKeepers(), "--rows", "" + OPTS.rows, "--table", tableName};
+ ClientConfiguration clientConfig = cluster.getClientConfig();
+ OPTS.updateKerberosCredentials(clientConfig);
+ VOPTS.updateKerberosCredentials(clientConfig);
+ } else {
+ throw new RuntimeException("Unknown token");
+ }
+
+ Future<Integer> ret = svc.submit(new Callable<Integer>() {
+ @Override
+ public Integer call() {
+ try {
+ return control.exec(TestIngest.class, args);
+ } catch (IOException e) {
+ log.error("Error running TestIngest", e);
+ return -1;
+ }
+ }
+ });
+
+ control.stopAllServers(ServerType.MASTER);
+ control.startAllServers(ServerType.MASTER);
+ assertEquals(0, ret.get().intValue());
+ VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+ }
+
+ @Test
+ public void restartMasterRecovery() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ OPTS.setTableName(tableName);
+ VOPTS.setTableName(tableName);
+ ClientConfiguration clientConfig = cluster.getClientConfig();
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ OPTS.updateKerberosCredentials(clientConfig);
+ VOPTS.updateKerberosCredentials(clientConfig);
+ } else {
+ OPTS.setPrincipal(getAdminPrincipal());
+ VOPTS.setPrincipal(getAdminPrincipal());
+ }
+ TestIngest.ingest(c, OPTS, BWOPTS);
+ ClusterControl control = getCluster().getClusterControl();
+
+ // TODO implement a kill all too?
+ // cluster.stop() would also stop ZooKeeper
+ control.stopAllServers(ServerType.MASTER);
+ control.stopAllServers(ServerType.TRACER);
+ control.stopAllServers(ServerType.TABLET_SERVER);
+ control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
+ control.stopAllServers(ServerType.MONITOR);
+
+ ZooReader zreader = new ZooReader(c.getInstance().getZooKeepers(), c.getInstance().getZooKeepersSessionTimeOut());
+ ZooCache zcache = new ZooCache(zreader, null);
+ byte[] masterLockData;
+ do {
+ masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
+ if (null != masterLockData) {
+ log.info("Master lock is still held");
+ Thread.sleep(1000);
+ }
+ } while (null != masterLockData);
+
+ cluster.start();
+ UtilWaitThread.sleep(5);
+ control.stopAllServers(ServerType.MASTER);
+
+ masterLockData = new byte[0];
+ do {
+ masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
+ if (null != masterLockData) {
+ log.info("Master lock is still held");
+ Thread.sleep(1000);
+ }
+ } while (null != masterLockData);
+ cluster.start();
+ VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+ }
+
+ @Test
+ public void restartMasterSplit() throws Exception {
+ Connector c = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ final AuthenticationToken token = getAdminToken();
+ final ClusterControl control = getCluster().getClusterControl();
+ VOPTS.setTableName(tableName);
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
+
+ final String[] args;
+ if (token instanceof PasswordToken) {
+ byte[] password = ((PasswordToken) token).getPassword();
+ args = new String[] {"-u", getAdminPrincipal(), "-p", new String(password, Charsets.UTF_8), "-i", cluster.getInstanceName(), "-z",
+ cluster.getZooKeepers(), "--rows", Integer.toString(VOPTS.rows), "--table", tableName};
+ OPTS.setPrincipal(getAdminPrincipal());
+ VOPTS.setPrincipal(getAdminPrincipal());
+ } else if (token instanceof KerberosToken) {
+ ClusterUser rootUser = getAdminUser();
+ args = new String[] {"-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z",
+ cluster.getZooKeepers(), "--rows", Integer.toString(VOPTS.rows), "--table", tableName};
+ ClientConfiguration clientConfig = cluster.getClientConfig();
+ OPTS.updateKerberosCredentials(clientConfig);
+ VOPTS.updateKerberosCredentials(clientConfig);
+ } else {
+ throw new RuntimeException("Unknown token");
+ }
+
+ Future<Integer> ret = svc.submit(new Callable<Integer>() {
+ @Override
+ public Integer call() {
+ try {
+ return control.exec(TestIngest.class, args);
+ } catch (Exception e) {
+ log.error("Error running TestIngest", e);
+ return -1;
+ }
+ }
+ });
+
+ control.stopAllServers(ServerType.MASTER);
+
+ ZooReader zreader = new ZooReader(c.getInstance().getZooKeepers(), c.getInstance().getZooKeepersSessionTimeOut());
+ ZooCache zcache = new ZooCache(zreader, null);
+ byte[] masterLockData;
+ do {
+ masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
+ if (null != masterLockData) {
+ log.info("Master lock is still held");
+ Thread.sleep(1000);
+ }
+ } while (null != masterLockData);
+
+ cluster.start();
+ assertEquals(0, ret.get().intValue());
+ VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+ }
+
+ @Test
+ public void killedTabletServer() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ OPTS.setTableName(tableName);
+ VOPTS.setTableName(tableName);
+ ClientConfiguration clientConfig = cluster.getClientConfig();
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ OPTS.updateKerberosCredentials(clientConfig);
+ VOPTS.updateKerberosCredentials(clientConfig);
+ } else {
+ OPTS.setPrincipal(getAdminPrincipal());
+ VOPTS.setPrincipal(getAdminPrincipal());
+ }
+ TestIngest.ingest(c, OPTS, BWOPTS);
+ VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+ cluster.getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ cluster.start();
+ VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+ }
+
+ @Test
+ public void killedTabletServer2() throws Exception {
+ final Connector c = getConnector();
+ final String[] names = getUniqueNames(2);
+ final String tableName = names[0];
+ final ClusterControl control = getCluster().getClusterControl();
+ c.tableOperations().create(tableName);
+ // Original test started and then stopped a GC. Not sure why it did this. The GC was
+ // already running by default, and it would have nothing to do after only creating a table
+ control.stopAllServers(ServerType.TABLET_SERVER);
+
+ cluster.start();
+ c.tableOperations().create(names[1]);
+ }
+
+ @Test
+ public void killedTabletServerDuringShutdown() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ OPTS.setTableName(tableName);
+ ClientConfiguration clientConfig = cluster.getClientConfig();
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ OPTS.updateKerberosCredentials(clientConfig);
+ } else {
+ OPTS.setPrincipal(getAdminPrincipal());
+ }
+ TestIngest.ingest(c, OPTS, BWOPTS);
+ try {
+ getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ getCluster().getClusterControl().adminStopAll();
+ } finally {
+ getCluster().start();
+ }
+ }
+
+ @Test
+ public void shutdownDuringCompactingSplitting() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ VOPTS.setTableName(tableName);
+ ClientConfiguration clientConfig = cluster.getClientConfig();
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ OPTS.updateKerberosCredentials(clientConfig);
+ VOPTS.updateKerberosCredentials(clientConfig);
+ } else {
+ OPTS.setPrincipal(getAdminPrincipal());
+ VOPTS.setPrincipal(getAdminPrincipal());
+ }
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+ String splitThreshold = null;
+ for (Entry<String,String> entry : c.tableOperations().getProperties(tableName)) {
+ if (entry.getKey().equals(Property.TABLE_SPLIT_THRESHOLD.getKey())) {
+ splitThreshold = entry.getValue();
+ break;
+ }
+ }
+ Assert.assertNotNull(splitThreshold);
+ try {
+ c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "20K");
+ TestIngest.Opts opts = new TestIngest.Opts();
+ opts.setTableName(tableName);
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConfig);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ }
+ TestIngest.ingest(c, opts, BWOPTS);
+ c.tableOperations().flush(tableName, null, null, false);
+ VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+ getCluster().stop();
+ } finally {
+ if (getClusterType() == ClusterType.STANDALONE) {
+ getCluster().start();
+ c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), splitThreshold);
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/RestartStressIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/RestartStressIT.java b/test/src/main/java/org/apache/accumulo/test/functional/RestartStressIT.java
new file mode 100644
index 0000000..abfd5d8
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/RestartStressIT.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.cluster.ClusterControl;
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Charsets;
+
+public class RestartStressIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(RestartStressIT.class);
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> opts = cfg.getSiteConfig();
+ opts.put(Property.TSERV_MAXMEM.getKey(), "100K");
+ opts.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
+ opts.put(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1M");
+ opts.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s");
+ opts.put(Property.MASTER_RECOVERY_DELAY.getKey(), "1s");
+ cfg.setSiteConfig(opts);
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 10 * 60;
+ }
+
+ private ExecutorService svc;
+
+ @Before
+ public void setup() throws Exception {
+ svc = Executors.newFixedThreadPool(1);
+ }
+
+ @After
+ public void teardown() throws Exception {
+ if (null == svc) {
+ return;
+ }
+
+ if (!svc.isShutdown()) {
+ svc.shutdown();
+ }
+
+ while (!svc.awaitTermination(10, TimeUnit.SECONDS)) {
+ log.info("Waiting for threadpool to terminate");
+ }
+ }
+
+ private static final VerifyIngest.Opts VOPTS;
+ static {
+ VOPTS = new VerifyIngest.Opts();
+ VOPTS.rows = 10 * 1000;
+ }
+ private static final ScannerOpts SOPTS = new ScannerOpts();
+
+ @Test
+ public void test() throws Exception {
+ final Connector c = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ final AuthenticationToken token = getAdminToken();
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "500K");
+ final ClusterControl control = getCluster().getClusterControl();
+ final String[] args;
+ if (token instanceof PasswordToken) {
+ byte[] password = ((PasswordToken) token).getPassword();
+ args = new String[] {"-u", getAdminPrincipal(), "-p", new String(password, Charsets.UTF_8), "-i", cluster.getInstanceName(), "-z",
+ cluster.getZooKeepers(), "--rows", "" + VOPTS.rows, "--table", tableName};
+ } else if (token instanceof KerberosToken) {
+ ClusterUser rootUser = getAdminUser();
+ args = new String[] {"-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z",
+ cluster.getZooKeepers(), "--rows", "" + VOPTS.rows, "--table", tableName};
+ } else {
+ throw new RuntimeException("Unrecognized token");
+ }
+
+ Future<Integer> retCode = svc.submit(new Callable<Integer>() {
+ @Override
+ public Integer call() {
+ try {
+ return control.exec(TestIngest.class, args);
+ } catch (Exception e) {
+ log.error("Error running TestIngest", e);
+ return -1;
+ }
+ }
+ });
+
+ for (int i = 0; i < 2; i++) {
+ UtilWaitThread.sleep(10 * 1000);
+ control.stopAllServers(ServerType.TABLET_SERVER);
+ control.startAllServers(ServerType.TABLET_SERVER);
+ }
+ assertEquals(0, retCode.get().intValue());
+ VOPTS.setTableName(tableName);
+
+ if (token instanceof PasswordToken) {
+ VOPTS.setPrincipal(getAdminPrincipal());
+ } else if (token instanceof KerberosToken) {
+ VOPTS.updateKerberosCredentials(cluster.getClientConfig());
+ } else {
+ throw new RuntimeException("Unrecognized token");
+ }
+
+ VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/RowDeleteIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/RowDeleteIT.java b/test/src/main/java/org/apache/accumulo/test/functional/RowDeleteIT.java
new file mode 100644
index 0000000..75c66bd
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/RowDeleteIT.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.apache.accumulo.test.functional.FunctionalTestUtils.checkRFiles;
+import static org.apache.accumulo.test.functional.FunctionalTestUtils.nm;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.iterators.user.RowDeletingIterator;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class RowDeleteIT extends AccumuloClusterHarness {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "50ms");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void run() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
+ groups.put("lg1", Collections.singleton(new Text("foo")));
+ groups.put("dg", Collections.<Text> emptySet());
+ c.tableOperations().setLocalityGroups(tableName, groups);
+ IteratorSetting setting = new IteratorSetting(30, RowDeletingIterator.class);
+ c.tableOperations().attachIterator(tableName, setting, EnumSet.of(IteratorScope.majc));
+ c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "100");
+
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+
+ bw.addMutation(nm("r1", "foo", "cf1", "v1"));
+ bw.addMutation(nm("r1", "bar", "cf1", "v2"));
+
+ bw.flush();
+ c.tableOperations().flush(tableName, null, null, true);
+
+ checkRFiles(c, tableName, 1, 1, 1, 1);
+
+ Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
+ int count = Iterators.size(scanner.iterator());
+ assertEquals("count == " + count, 2, count);
+
+ bw.addMutation(nm("r1", "", "", RowDeletingIterator.DELETE_ROW_VALUE));
+
+ bw.flush();
+ c.tableOperations().flush(tableName, null, null, true);
+
+ checkRFiles(c, tableName, 1, 1, 2, 2);
+
+ scanner = c.createScanner(tableName, Authorizations.EMPTY);
+ count = Iterators.size(scanner.iterator());
+ assertEquals("count == " + count, 3, count);
+
+ c.tableOperations().compact(tableName, null, null, false, true);
+
+ checkRFiles(c, tableName, 1, 1, 0, 0);
+
+ scanner = c.createScanner(tableName, Authorizations.EMPTY);
+ count = Iterators.size(scanner.iterator());
+ assertEquals("count == " + count, 0, count);
+ bw.close();
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java
new file mode 100644
index 0000000..863ac78
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ScanIdIT.java
@@ -0,0 +1,385 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static com.google.common.base.Charsets.UTF_8;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.ActiveScan;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorUtil;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * ACCUMULO-2641 Integration test. ACCUMULO-2641 Adds scan id to thrift protocol so that {@code org.apache.accumulo.core.client.admin.ActiveScan.getScanid()}
+ * returns a unique scan id.
+ * <p>
+ * <p/>
+ * The test uses the Minicluster and the {@code org.apache.accumulo.test.functional.SlowIterator} to create multiple scan sessions. The test exercises multiple
+ * tablet servers with splits and multiple ranges to force the scans to occur across multiple tablet servers for completeness.
+ * <p/>
+ * This patch modified thrift, the TraceRepoDeserializationTest test seems to fail unless the following be added:
+ * <p/>
+ * private static final long serialVersionUID = -4659975753252858243l;
+ * <p/>
+ * back into org.apache.accumulo.trace.thrift.TInfo until that test signature is regenerated.
+ */
+public class ScanIdIT extends AccumuloClusterHarness {
+
+ private static final Logger log = LoggerFactory.getLogger(ScanIdIT.class);
+
+ private static final int NUM_SCANNERS = 8;
+
+ private static final int NUM_DATA_ROWS = 100;
+
+ private static final Random random = new Random();
+
+ private static final ExecutorService pool = Executors.newFixedThreadPool(NUM_SCANNERS);
+
+ private static final AtomicBoolean testInProgress = new AtomicBoolean(true);
+
+ private static final Map<Integer,Value> resultsByWorker = new ConcurrentHashMap<Integer,Value>();
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ /**
+ * @throws Exception
+ * any exception is a test failure.
+ */
+ @Test
+ public void testScanId() throws Exception {
+
+ final String tableName = getUniqueNames(1)[0];
+ Connector conn = getConnector();
+ conn.tableOperations().create(tableName);
+
+ addSplits(conn, tableName);
+
+ log.info("Splits added");
+
+ generateSampleData(conn, tableName);
+
+ log.info("Generated data for {}", tableName);
+
+ attachSlowIterator(conn, tableName);
+
+ CountDownLatch latch = new CountDownLatch(NUM_SCANNERS);
+
+ for (int scannerIndex = 0; scannerIndex < NUM_SCANNERS; scannerIndex++) {
+ ScannerThread st = new ScannerThread(conn, scannerIndex, tableName, latch);
+ pool.submit(st);
+ }
+
+ // wait for scanners to report a result.
+ while (testInProgress.get()) {
+
+ if (resultsByWorker.size() < NUM_SCANNERS) {
+ log.trace("Results reported {}", resultsByWorker.size());
+ UtilWaitThread.sleep(750);
+ } else {
+ // each worker has reported at least one result.
+ testInProgress.set(false);
+
+ log.debug("Final result count {}", resultsByWorker.size());
+
+ // delay to allow scanners to react to end of test and cleanly close.
+ UtilWaitThread.sleep(1000);
+ }
+
+ }
+
+ // all scanner have reported at least 1 result, so check for unique scan ids.
+ Set<Long> scanIds = new HashSet<Long>();
+
+ List<String> tservers = conn.instanceOperations().getTabletServers();
+
+ log.debug("tablet servers {}", tservers.toString());
+
+ for (String tserver : tservers) {
+
+ List<ActiveScan> activeScans = null;
+ for (int i = 0; i < 10; i++) {
+ try {
+ activeScans = conn.instanceOperations().getActiveScans(tserver);
+ break;
+ } catch (AccumuloException e) {
+ if (e.getCause() instanceof TableNotFoundException) {
+ log.debug("Got TableNotFoundException, will retry");
+ Thread.sleep(200);
+ continue;
+ }
+ throw e;
+ }
+ }
+
+ assertNotNull("Repeatedly got exception trying to active scans", activeScans);
+
+ log.debug("TServer {} has {} active scans", tserver, activeScans.size());
+
+ for (ActiveScan scan : activeScans) {
+ log.debug("Tserver {} scan id {}", tserver, scan.getScanid());
+ scanIds.add(scan.getScanid());
+ }
+ }
+
+ assertTrue("Expected at least " + NUM_SCANNERS + " scanIds, but saw " + scanIds.size(), NUM_SCANNERS <= scanIds.size());
+
+ }
+
+ /**
+ * Runs scanner in separate thread to allow multiple scanners to execute in parallel.
+ * <p/>
+ * The thread run method is terminated when the testInProgress flag is set to false.
+ */
+ private static class ScannerThread implements Runnable {
+
+ private final Connector connector;
+ private Scanner scanner = null;
+ private final int workerIndex;
+ private final String tablename;
+ private final CountDownLatch latch;
+
+ public ScannerThread(final Connector connector, final int workerIndex, final String tablename, final CountDownLatch latch) {
+ this.connector = connector;
+ this.workerIndex = workerIndex;
+ this.tablename = tablename;
+ this.latch = latch;
+ }
+
+ /**
+ * execute the scan across the sample data and put scan result into result map until testInProgress flag is set to false.
+ */
+ @Override
+ public void run() {
+
+ latch.countDown();
+ try {
+ latch.await();
+ } catch (InterruptedException e) {
+ log.error("Thread interrupted with id {}", workerIndex);
+ Thread.currentThread().interrupt();
+ return;
+ }
+
+ log.debug("Creating scanner in worker thread {}", workerIndex);
+
+ try {
+
+ scanner = connector.createScanner(tablename, new Authorizations());
+
+ // Never start readahead
+ scanner.setReadaheadThreshold(Long.MAX_VALUE);
+ scanner.setBatchSize(1);
+
+ // create different ranges to try to hit more than one tablet.
+ scanner.setRange(new Range(new Text(Integer.toString(workerIndex)), new Text("9")));
+
+ } catch (TableNotFoundException e) {
+ throw new IllegalStateException("Initialization failure. Could not create scanner", e);
+ }
+
+ scanner.fetchColumnFamily(new Text("fam1"));
+
+ for (Map.Entry<Key,Value> entry : scanner) {
+
+ // exit when success condition is met.
+ if (!testInProgress.get()) {
+ scanner.clearScanIterators();
+ scanner.close();
+
+ return;
+ }
+
+ Text row = entry.getKey().getRow();
+
+ log.debug("worker {}, row {}", workerIndex, row.toString());
+
+ if (entry.getValue() != null) {
+
+ Value prevValue = resultsByWorker.put(workerIndex, entry.getValue());
+
+ // value should always being increasing
+ if (prevValue != null) {
+
+ log.trace("worker {} values {}", workerIndex, String.format("%1$s < %2$s", prevValue, entry.getValue()));
+
+ assertTrue(prevValue.compareTo(entry.getValue()) > 0);
+ }
+ } else {
+ log.info("Scanner returned null");
+ fail("Scanner returned unexpected null value");
+ }
+
+ }
+
+ log.debug("Scanner ran out of data. (info only, not an error) ");
+
+ }
+ }
+
+ /**
+ * Create splits on table and force migration by taking table offline and then bring back online for test.
+ *
+ * @param conn
+ * Accumulo connector Accumulo connector to test cluster or MAC instance.
+ */
+ private void addSplits(final Connector conn, final String tableName) {
+
+ SortedSet<Text> splits = createSplits();
+
+ try {
+
+ conn.tableOperations().addSplits(tableName, splits);
+
+ conn.tableOperations().offline(tableName, true);
+
+ UtilWaitThread.sleep(2000);
+ conn.tableOperations().online(tableName, true);
+
+ for (Text split : conn.tableOperations().listSplits(tableName)) {
+ log.trace("Split {}", split);
+ }
+
+ } catch (AccumuloSecurityException e) {
+ throw new IllegalStateException("Initialization failed. Could not add splits to " + tableName, e);
+ } catch (TableNotFoundException e) {
+ throw new IllegalStateException("Initialization failed. Could not add splits to " + tableName, e);
+ } catch (AccumuloException e) {
+ throw new IllegalStateException("Initialization failed. Could not add splits to " + tableName, e);
+ }
+
+ }
+
+ /**
+ * Create splits to distribute data across multiple tservers.
+ *
+ * @return splits in sorted set for addSplits.
+ */
+ private SortedSet<Text> createSplits() {
+
+ SortedSet<Text> splits = new TreeSet<Text>();
+
+ for (int split = 0; split < 10; split++) {
+ splits.add(new Text(Integer.toString(split)));
+ }
+
+ return splits;
+ }
+
+ /**
+ * Generate some sample data using random row id to distribute across splits.
+ * <p/>
+ * The primary goal is to determine that each scanner is assigned a unique scan id. This test does check that the count value for fam1 increases if a scanner
+ * reads multiple value, but this is secondary consideration for this test, that is included for completeness.
+ *
+ * @param connector
+ * Accumulo connector Accumulo connector to test cluster or MAC instance.
+ */
+ private void generateSampleData(Connector connector, final String tablename) {
+
+ try {
+
+ BatchWriter bw = connector.createBatchWriter(tablename, new BatchWriterConfig());
+
+ ColumnVisibility vis = new ColumnVisibility("public");
+
+ for (int i = 0; i < NUM_DATA_ROWS; i++) {
+
+ Text rowId = new Text(String.format("%d", ((random.nextInt(10) * 100) + i)));
+
+ Mutation m = new Mutation(rowId);
+ m.put(new Text("fam1"), new Text("count"), new Value(Integer.toString(i).getBytes(UTF_8)));
+ m.put(new Text("fam1"), new Text("positive"), vis, new Value(Integer.toString(NUM_DATA_ROWS - i).getBytes(UTF_8)));
+ m.put(new Text("fam1"), new Text("negative"), vis, new Value(Integer.toString(i - NUM_DATA_ROWS).getBytes(UTF_8)));
+
+ log.trace("Added row {}", rowId);
+
+ bw.addMutation(m);
+ }
+
+ bw.close();
+ } catch (TableNotFoundException ex) {
+ throw new IllegalStateException("Initialization failed. Could not create test data", ex);
+ } catch (MutationsRejectedException ex) {
+ throw new IllegalStateException("Initialization failed. Could not create test data", ex);
+ }
+ }
+
+ /**
+ * Attach the test slow iterator so that we have time to read the scan id without creating a large dataset. Uses a fairly large sleep and delay times because
+ * we are not concerned with how much data is read and we do not read all of the data - the test stops once each scanner reports a scan id.
+ *
+ * @param connector
+ * Accumulo connector Accumulo connector to test cluster or MAC instance.
+ */
+ private void attachSlowIterator(Connector connector, final String tablename) {
+ try {
+
+ IteratorSetting slowIter = new IteratorSetting(50, "slowIter", "org.apache.accumulo.test.functional.SlowIterator");
+ slowIter.addOption("sleepTime", "200");
+ slowIter.addOption("seekSleepTime", "200");
+
+ connector.tableOperations().attachIterator(tablename, slowIter, EnumSet.of(IteratorUtil.IteratorScope.scan));
+
+ } catch (AccumuloException ex) {
+ throw new IllegalStateException("Initialization failed. Could not attach slow iterator", ex);
+ } catch (TableNotFoundException ex) {
+ throw new IllegalStateException("Initialization failed. Could not attach slow iterator", ex);
+ } catch (AccumuloSecurityException ex) {
+ throw new IllegalStateException("Initialization failed. Could not attach slow iterator", ex);
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScanIteratorIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
new file mode 100644
index 0000000..3453303
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.ScannerBase;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class ScanIteratorIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 30;
+ }
+
+ @Test
+ public void run() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ c.tableOperations().create(tableName);
+
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+
+ for (int i = 0; i < 1000; i++) {
+ Mutation m = new Mutation(new Text(String.format("%06d", i)));
+ m.put(new Text("cf1"), new Text("cq1"), new Value(Integer.toString(1000 - i).getBytes(UTF_8)));
+ m.put(new Text("cf1"), new Text("cq2"), new Value(Integer.toString(i - 1000).getBytes(UTF_8)));
+
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ Scanner scanner = c.createScanner(tableName, new Authorizations());
+
+ setupIter(scanner);
+ verify(scanner, 1, 999);
+
+ BatchScanner bscanner = c.createBatchScanner(tableName, new Authorizations(), 3);
+ bscanner.setRanges(Collections.singleton(new Range((Key) null, null)));
+
+ setupIter(bscanner);
+ verify(bscanner, 1, 999);
+
+ ArrayList<Range> ranges = new ArrayList<Range>();
+ ranges.add(new Range(new Text(String.format("%06d", 1))));
+ ranges.add(new Range(new Text(String.format("%06d", 6)), new Text(String.format("%06d", 16))));
+ ranges.add(new Range(new Text(String.format("%06d", 20))));
+ ranges.add(new Range(new Text(String.format("%06d", 23))));
+ ranges.add(new Range(new Text(String.format("%06d", 56)), new Text(String.format("%06d", 61))));
+ ranges.add(new Range(new Text(String.format("%06d", 501)), new Text(String.format("%06d", 504))));
+ ranges.add(new Range(new Text(String.format("%06d", 998)), new Text(String.format("%06d", 1000))));
+
+ HashSet<Integer> got = new HashSet<Integer>();
+ HashSet<Integer> expected = new HashSet<Integer>();
+ for (int i : new int[] {1, 7, 9, 11, 13, 15, 23, 57, 59, 61, 501, 503, 999}) {
+ expected.add(i);
+ }
+
+ bscanner.setRanges(ranges);
+
+ for (Entry<Key,Value> entry : bscanner) {
+ got.add(Integer.parseInt(entry.getKey().getRow().toString()));
+ }
+
+ System.out.println("got : " + got);
+
+ if (!got.equals(expected)) {
+ throw new Exception(got + " != " + expected);
+ }
+
+ bscanner.close();
+
+ }
+
+ private void verify(Iterable<Entry<Key,Value>> scanner, int start, int finish) throws Exception {
+
+ int expected = start;
+ for (Entry<Key,Value> entry : scanner) {
+ if (Integer.parseInt(entry.getKey().getRow().toString()) != expected) {
+ throw new Exception("Saw unexpexted " + entry.getKey().getRow() + " " + expected);
+ }
+
+ if (entry.getKey().getColumnQualifier().toString().equals("cq2")) {
+ expected += 2;
+ }
+ }
+
+ if (expected != finish + 2) {
+ throw new Exception("Ended at " + expected + " not " + (finish + 2));
+ }
+ }
+
+ private void setupIter(ScannerBase scanner) throws Exception {
+ IteratorSetting dropMod = new IteratorSetting(50, "dropMod", "org.apache.accumulo.test.functional.DropModIter");
+ dropMod.addOption("mod", "2");
+ dropMod.addOption("drop", "0");
+ scanner.addScanIterator(dropMod);
+ }
+
+}
[42/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloMiniClusterConfiguration.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloMiniClusterConfiguration.java b/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloMiniClusterConfiguration.java
new file mode 100644
index 0000000..4d233a5
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/harness/conf/AccumuloMiniClusterConfiguration.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.harness.conf;
+
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.harness.AccumuloClusterHarness.ClusterType;
+import org.apache.accumulo.harness.MiniClusterHarness;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Extract configuration properties for a MiniAccumuloCluster from Java properties
+ */
+public class AccumuloMiniClusterConfiguration extends AccumuloClusterPropertyConfiguration {
+ private static final Logger log = LoggerFactory.getLogger(AccumuloMiniClusterConfiguration.class);
+ private static final String TRUE = Boolean.toString(true);
+
+ public static final String ACCUMULO_MINI_PRINCIPAL_KEY = ACCUMULO_MINI_PREFIX + "principal";
+ public static final String ACCUMULO_MINI_PRINCIPAL_DEFAULT = "root";
+ public static final String ACCUMULO_MINI_PASSWORD_KEY = ACCUMULO_MINI_PREFIX + "password";
+ public static final String ACCUMULO_MINI_PASSWORD_DEFAULT = "rootPassword1";
+
+ private final Map<String,String> conf;
+ private final boolean saslEnabled;
+ private ClientConfiguration clientConf;
+
+ public AccumuloMiniClusterConfiguration() {
+ ClusterType type = getClusterType();
+ if (ClusterType.MINI != type) {
+ throw new IllegalStateException("Expected only to see mini cluster state");
+ }
+
+ this.conf = getConfiguration(type);
+ this.saslEnabled = TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION));
+ log.debug("SASL is {}enabled", (saslEnabled ? "" : "not "));
+ }
+
+ @Override
+ public String getAdminPrincipal() {
+ if (saslEnabled) {
+ return AccumuloClusterHarness.getKdc().getRootUser().getPrincipal();
+ } else {
+ String principal = conf.get(ACCUMULO_MINI_PRINCIPAL_KEY);
+ if (null == principal) {
+ principal = ACCUMULO_MINI_PRINCIPAL_DEFAULT;
+ }
+
+ return principal;
+ }
+ }
+
+ @Override
+ public AuthenticationToken getAdminToken() {
+ if (saslEnabled) {
+ // Turn on Kerberos authentication so UGI acts properly
+ final Configuration conf = new Configuration(false);
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+
+ ClusterUser rootUser = AccumuloClusterHarness.getKdc().getRootUser();
+ try {
+ return new KerberosToken(rootUser.getPrincipal(), rootUser.getKeytab(), true);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ } else {
+ String password = conf.get(ACCUMULO_MINI_PASSWORD_KEY);
+ if (null == password) {
+ password = ACCUMULO_MINI_PASSWORD_DEFAULT;
+ }
+
+ return new PasswordToken(password);
+ }
+ }
+
+ @Override
+ public ClusterType getClusterType() {
+ return ClusterType.MINI;
+ }
+
+ @Override
+ public ClientConfiguration getClientConf() {
+ return clientConf;
+ }
+
+ public void setClientConf(ClientConfiguration conf) {
+ Preconditions.checkNotNull(conf, "Client configuration was null");
+ this.clientConf = conf;
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java b/test/src/main/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java
new file mode 100644
index 0000000..ba9dcef
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.harness.conf;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.harness.AccumuloClusterHarness.ClusterType;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.hadoop.fs.Path;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Extract connection information to a standalone Accumulo instance from Java properties
+ */
+public class StandaloneAccumuloClusterConfiguration extends AccumuloClusterPropertyConfiguration {
+ private static final Logger log = LoggerFactory.getLogger(StandaloneAccumuloClusterConfiguration.class);
+
+ public static final String ACCUMULO_STANDALONE_ADMIN_PRINCIPAL_KEY = ACCUMULO_STANDALONE_PREFIX + "admin.principal";
+ public static final String ACCUMULO_STANDALONE_ADMIN_PRINCIPAL_DEFAULT = "root";
+ public static final String ACCUMULO_STANDALONE_PASSWORD_KEY = ACCUMULO_STANDALONE_PREFIX + "admin.password";
+ public static final String ACCUMULO_STANDALONE_PASSWORD_DEFAULT = "rootPassword1";
+ public static final String ACCUMULO_STANDALONE_ADMIN_KEYTAB_KEY = ACCUMULO_STANDALONE_PREFIX + "admin.keytab";
+ public static final String ACCUMULO_STANDALONE_ZOOKEEPERS_KEY = ACCUMULO_STANDALONE_PREFIX + "zookeepers";
+ public static final String ACCUMULO_STANDALONE_ZOOKEEPERS_DEFAULT = "localhost";
+ public static final String ACCUMULO_STANDALONE_INSTANCE_NAME_KEY = ACCUMULO_STANDALONE_PREFIX + "instance.name";
+ public static final String ACCUMULO_STANDALONE_INSTANCE_NAME_DEFAULT = "accumulo";
+ public static final String ACCUMULO_STANDALONE_TMP_DIR_KEY = ACCUMULO_STANDALONE_PREFIX + "tmpdir";
+ public static final String ACCUMULO_STANDALONE_TMP_DIR_DEFAULT = "/tmp";
+ public static final String ACCUMULO_STANDALONE_SERVER_USER = ACCUMULO_STANDALONE_PREFIX + "server.user";
+ public static final String ACCUMULO_STANDALONE_SERVER_USER_DEFAULT = "accumulo";
+
+ // A set of users we can use to connect to this instances
+ public static final String ACCUMULO_STANDALONE_USER_KEY = ACCUMULO_STANDALONE_PREFIX + "users.";
+ // Keytabs for the users
+ public static final String ACCUMULO_STANDALONE_USER_KEYTABS_KEY = ACCUMULO_STANDALONE_PREFIX + "keytabs.";
+ // Passwords for the users
+ public static final String ACCUMULO_STANDALONE_USER_PASSWORDS_KEY = ACCUMULO_STANDALONE_PREFIX + "passwords.";
+
+ public static final String ACCUMULO_STANDALONE_HOME = ACCUMULO_STANDALONE_PREFIX + "home";
+ public static final String ACCUMULO_STANDALONE_CLIENT_CONF = ACCUMULO_STANDALONE_PREFIX + "client.conf";
+ public static final String ACCUMULO_STANDALONE_SERVER_CONF = ACCUMULO_STANDALONE_PREFIX + "server.conf";
+ public static final String ACCUMULO_STANDALONE_HADOOP_CONF = ACCUMULO_STANDALONE_PREFIX + "hadoop.conf";
+
+ private Map<String,String> conf;
+ private String serverUser;
+ private File clientConfFile;
+ private ClientConfiguration clientConf;
+ private List<ClusterUser> clusterUsers;
+
+ public StandaloneAccumuloClusterConfiguration(File clientConfFile) {
+ ClusterType type = getClusterType();
+ if (ClusterType.STANDALONE != type) {
+ throw new IllegalStateException("Expected only to see standalone cluster state");
+ }
+
+ this.conf = getConfiguration(type);
+ this.clientConfFile = clientConfFile;
+ try {
+ this.clientConf = new ClientConfiguration(clientConfFile);
+ } catch (ConfigurationException e) {
+ throw new RuntimeException("Failed to load client configuration from " + clientConfFile);
+ }
+ // Update instance name if not already set
+ if (!clientConf.containsKey(ClientProperty.INSTANCE_NAME.getKey())) {
+ clientConf.withInstance(getInstanceName());
+ }
+ // Update zookeeper hosts if not already set
+ if (!clientConf.containsKey(ClientProperty.INSTANCE_ZK_HOST.getKey())) {
+ clientConf.withZkHosts(getZooKeepers());
+ }
+
+ // The user Accumulo is running as
+ serverUser = conf.get(ACCUMULO_STANDALONE_SERVER_USER);
+ if (null == serverUser) {
+ serverUser = ACCUMULO_STANDALONE_SERVER_USER_DEFAULT;
+ }
+
+ clusterUsers = new ArrayList<>();
+ for (Entry<String,String> entry : conf.entrySet()) {
+ String key = entry.getKey();
+ if (key.startsWith(ACCUMULO_STANDALONE_USER_KEY)) {
+ String suffix = key.substring(ACCUMULO_STANDALONE_USER_KEY.length());
+ String keytab = conf.get(ACCUMULO_STANDALONE_USER_KEYTABS_KEY + suffix);
+ if (null != keytab) {
+ File keytabFile = new File(keytab);
+ assertTrue("Keytab doesn't exist: " + keytabFile, keytabFile.exists() && keytabFile.isFile());
+ clusterUsers.add(new ClusterUser(entry.getValue(), keytabFile));
+ } else {
+ String password = conf.get(ACCUMULO_STANDALONE_USER_PASSWORDS_KEY + suffix);
+ if (null == password) {
+ throw new IllegalArgumentException("Missing password or keytab configuration for user with offset " + suffix);
+ }
+ clusterUsers.add(new ClusterUser(entry.getValue(), password));
+ }
+ }
+ }
+ log.info("Initialized Accumulo users with Kerberos keytabs: {}", clusterUsers);
+ }
+
+ @Override
+ public String getAdminPrincipal() {
+ String principal = conf.get(ACCUMULO_STANDALONE_ADMIN_PRINCIPAL_KEY);
+ if (null == principal) {
+ principal = ACCUMULO_STANDALONE_ADMIN_PRINCIPAL_DEFAULT;
+ }
+ return principal;
+ }
+
+ public String getPassword() {
+ String password = conf.get(ACCUMULO_STANDALONE_PASSWORD_KEY);
+ if (null == password) {
+ password = ACCUMULO_STANDALONE_PASSWORD_DEFAULT;
+ }
+ return password;
+ }
+
+ public File getAdminKeytab() {
+ String keytabPath = conf.get(ACCUMULO_STANDALONE_ADMIN_KEYTAB_KEY);
+ if (null == keytabPath) {
+ throw new RuntimeException("SASL is enabled, but " + ACCUMULO_STANDALONE_ADMIN_KEYTAB_KEY + " was not provided");
+ }
+ File keytab = new File(keytabPath);
+ if (!keytab.exists() || !keytab.isFile()) {
+ throw new RuntimeException(keytabPath + " should be a regular file");
+ }
+ return keytab;
+ }
+
+ @Override
+ public AuthenticationToken getAdminToken() {
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ File keytab = getAdminKeytab();
+ try {
+ return new KerberosToken(getAdminPrincipal(), keytab, true);
+ } catch (IOException e) {
+ // The user isn't logged in
+ throw new RuntimeException("Failed to create KerberosToken", e);
+ }
+ } else {
+ return new PasswordToken(getPassword());
+ }
+ }
+
+ public String getZooKeepers() {
+ if (clientConf.containsKey(ClientProperty.INSTANCE_ZK_HOST.getKey())) {
+ return clientConf.get(ClientProperty.INSTANCE_ZK_HOST);
+ }
+
+ String zookeepers = conf.get(ACCUMULO_STANDALONE_ZOOKEEPERS_KEY);
+ if (null == zookeepers) {
+ zookeepers = ACCUMULO_STANDALONE_ZOOKEEPERS_DEFAULT;
+ }
+ return zookeepers;
+ }
+
+ public String getInstanceName() {
+ if (clientConf.containsKey(ClientProperty.INSTANCE_NAME.getKey())) {
+ return clientConf.get(ClientProperty.INSTANCE_NAME);
+ }
+
+ String instanceName = conf.get(ACCUMULO_STANDALONE_INSTANCE_NAME_KEY);
+ if (null == instanceName) {
+ instanceName = ACCUMULO_STANDALONE_INSTANCE_NAME_DEFAULT;
+ }
+ return instanceName;
+ }
+
+ public Instance getInstance() {
+ // Make sure the ZKI is created with the ClientConf so it gets things like SASL passed through to the connector
+ return new ZooKeeperInstance(clientConf);
+ }
+
+ @Override
+ public ClusterType getClusterType() {
+ return ClusterType.STANDALONE;
+ }
+
+ public String getHadoopConfDir() {
+ return conf.get(ACCUMULO_STANDALONE_HADOOP_CONF);
+ }
+
+ public String getAccumuloHome() {
+ return conf.get(ACCUMULO_STANDALONE_HOME);
+ }
+
+ public String getClientAccumuloConfDir() {
+ return conf.get(ACCUMULO_STANDALONE_CLIENT_CONF);
+ }
+
+ public String getServerAccumuloConfDir() {
+ return conf.get(ACCUMULO_STANDALONE_SERVER_CONF);
+ }
+
+ @Override
+ public ClientConfiguration getClientConf() {
+ return clientConf;
+ }
+
+ public File getClientConfFile() {
+ return clientConfFile;
+ }
+
+ public Path getTmpDirectory() {
+ String tmpDir = conf.get(ACCUMULO_STANDALONE_TMP_DIR_KEY);
+ if (null == tmpDir) {
+ tmpDir = ACCUMULO_STANDALONE_TMP_DIR_DEFAULT;
+ }
+ return new Path(tmpDir);
+ }
+
+ public List<ClusterUser> getUsers() {
+ return Collections.unmodifiableList(clusterUsers);
+ }
+
+ /**
+ * @return The user Accumulo is running as
+ */
+ public String getAccumuloServerUser() {
+ return serverUser;
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/AccumuloOutputFormatIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/AccumuloOutputFormatIT.java b/test/src/main/java/org/apache/accumulo/test/AccumuloOutputFormatIT.java
new file mode 100644
index 0000000..a2f522e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/AccumuloOutputFormatIT.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static com.google.common.base.Charsets.UTF_8;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.mapred.AccumuloOutputFormat;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.RecordWriter;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.rules.TemporaryFolder;
+
+import com.google.common.collect.Maps;
+
+/**
+ * Prevent regression of ACCUMULO-3709. Exists as a mini test because mock instance doesn't produce this error when dynamically changing the table permissions.
+ */
+public class AccumuloOutputFormatIT {
+
+ private static final String TABLE = "abc";
+ private MiniAccumuloCluster accumulo;
+ private String secret = "secret";
+
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
+
+ @Rule
+ public ExpectedException exception = ExpectedException.none();
+
+ @Before
+ public void setUp() throws Exception {
+ folder.create();
+ MiniAccumuloConfig config = new MiniAccumuloConfig(folder.getRoot(), secret);
+ Map<String,String> configMap = Maps.newHashMap();
+ configMap.put(Property.TSERV_SESSION_MAXIDLE.toString(), "1");
+ config.setSiteConfig(configMap);
+ config.setNumTservers(1);
+ accumulo = new MiniAccumuloCluster(config);
+ accumulo.start();
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ accumulo.stop();
+ folder.delete();
+ }
+
+ @Test
+ public void testMapred() throws Exception {
+ ClientConfiguration clientConfig = accumulo.getClientConfig();
+ ZooKeeperInstance instance = new ZooKeeperInstance(clientConfig);
+ Connector connector = instance.getConnector("root", new PasswordToken(secret));
+ // create a table and put some data in it
+ connector.tableOperations().create(TABLE);
+
+ JobConf job = new JobConf();
+ BatchWriterConfig batchConfig = new BatchWriterConfig();
+ // no flushes!!!!!
+ batchConfig.setMaxLatency(0, TimeUnit.MILLISECONDS);
+ // use a single thread to ensure our update session times out
+ batchConfig.setMaxWriteThreads(1);
+ // set the max memory so that we ensure we don't flush on the write.
+ batchConfig.setMaxMemory(Long.MAX_VALUE);
+ AccumuloOutputFormat outputFormat = new AccumuloOutputFormat();
+ AccumuloOutputFormat.setBatchWriterOptions(job, batchConfig);
+ AccumuloOutputFormat.setZooKeeperInstance(job, clientConfig);
+ AccumuloOutputFormat.setConnectorInfo(job, "root", new PasswordToken(secret));
+ RecordWriter<Text,Mutation> writer = outputFormat.getRecordWriter(null, job, "Test", null);
+
+ try {
+ for (int i = 0; i < 3; i++) {
+ Mutation m = new Mutation(new Text(String.format("%08d", i)));
+ for (int j = 0; j < 3; j++) {
+ m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(UTF_8)));
+ writer.write(new Text(TABLE), m);
+ }
+ }
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ // we don't want the exception to come from write
+ }
+
+ connector.securityOperations().revokeTablePermission("root", TABLE, TablePermission.WRITE);
+
+ exception.expect(IOException.class);
+ exception.expectMessage("PERMISSION_DENIED");
+ writer.close(null);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java b/test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
new file mode 100644
index 0000000..213ab59
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/ArbitraryTablePropertiesIT.java
@@ -0,0 +1,198 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ArbitraryTablePropertiesIT extends SharedMiniClusterBase {
+ private static final Logger log = LoggerFactory.getLogger(ArbitraryTablePropertiesIT.class);
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 30;
+ };
+
+ // Test set, get, and remove arbitrary table properties on the root account
+ @Test
+ public void setGetRemoveTablePropertyRoot() throws Exception {
+ log.debug("Starting setGetRemoveTablePropertyRoot test ------------------------");
+
+ // make a table
+ final String tableName = getUniqueNames(1)[0];
+ final Connector conn = getConnector();
+ conn.tableOperations().create(tableName);
+
+ // Set variables for the property name to use and the initial value
+ String propertyName = "table.custom.description";
+ String description1 = "Description";
+
+ // Make sure the property name is valid
+ Assert.assertTrue(Property.isValidPropertyKey(propertyName));
+ // Set the property to the desired value
+ conn.tableOperations().setProperty(tableName, propertyName, description1);
+
+ // Loop through properties to make sure the new property is added to the list
+ int count = 0;
+ for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
+ if (property.getKey().equals(propertyName) && property.getValue().equals(description1))
+ count++;
+ }
+ Assert.assertEquals(count, 1);
+
+ // Set the property as something different
+ String description2 = "set second";
+ conn.tableOperations().setProperty(tableName, propertyName, description2);
+
+ // / Loop through properties to make sure the new property is added to the list
+ count = 0;
+ for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
+ if (property.getKey().equals(propertyName) && property.getValue().equals(description2))
+ count++;
+ }
+ Assert.assertEquals(count, 1);
+
+ // Remove the property and make sure there is no longer a value associated with it
+ conn.tableOperations().removeProperty(tableName, propertyName);
+
+ // / Loop through properties to make sure the new property is added to the list
+ count = 0;
+ for (Entry<String,String> property : conn.tableOperations().getProperties(tableName)) {
+ if (property.getKey().equals(propertyName))
+ count++;
+ }
+ Assert.assertEquals(count, 0);
+ }
+
+ // Tests set, get, and remove of user added arbitrary properties using a non-root account with permissions to alter tables
+ @Test
+ public void userSetGetRemoveTablePropertyWithPermission() throws Exception {
+ log.debug("Starting userSetGetRemoveTablePropertyWithPermission test ------------------------");
+
+ // Make a test username and password
+ ClusterUser user = getUser(0);
+ String testUser = user.getPrincipal();
+ AuthenticationToken testToken = user.getToken();
+
+ // Create a root user and create the table
+ // Create a test user and grant that user permission to alter the table
+ final String tableName = getUniqueNames(1)[0];
+ final Connector c = getConnector();
+ c.securityOperations().createLocalUser(testUser, (testToken instanceof PasswordToken ? (PasswordToken) testToken : null));
+ c.tableOperations().create(tableName);
+ c.securityOperations().grantTablePermission(testUser, tableName, TablePermission.ALTER_TABLE);
+
+ // Set variables for the property name to use and the initial value
+ String propertyName = "table.custom.description";
+ String description1 = "Description";
+
+ // Make sure the property name is valid
+ Assert.assertTrue(Property.isValidPropertyKey(propertyName));
+
+ // Getting a fresh token will ensure we're logged in as this user (if necessary)
+ Connector testConn = c.getInstance().getConnector(testUser, user.getToken());
+ // Set the property to the desired value
+ testConn.tableOperations().setProperty(tableName, propertyName, description1);
+
+ // Loop through properties to make sure the new property is added to the list
+ int count = 0;
+ for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
+ if (property.getKey().equals(propertyName) && property.getValue().equals(description1))
+ count++;
+ }
+ Assert.assertEquals(count, 1);
+
+ // Set the property as something different
+ String description2 = "set second";
+ testConn.tableOperations().setProperty(tableName, propertyName, description2);
+
+ // / Loop through properties to make sure the new property is added to the list
+ count = 0;
+ for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
+ if (property.getKey().equals(propertyName) && property.getValue().equals(description2))
+ count++;
+ }
+ Assert.assertEquals(count, 1);
+
+ // Remove the property and make sure there is no longer a value associated with it
+ testConn.tableOperations().removeProperty(tableName, propertyName);
+
+ // / Loop through properties to make sure the new property is added to the list
+ count = 0;
+ for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
+ if (property.getKey().equals(propertyName))
+ count++;
+ }
+ Assert.assertEquals(count, 0);
+
+ }
+
+ // Tests set and get of user added arbitrary properties using a non-root account without permissions to alter tables
+ @Test
+ public void userSetGetTablePropertyWithoutPermission() throws Exception {
+ log.debug("Starting userSetGetTablePropertyWithoutPermission test ------------------------");
+
+ // Make a test username and password
+ ClusterUser user = getUser(1);
+ String testUser = user.getPrincipal();
+ AuthenticationToken testToken = user.getToken();
+
+ // Create a root user and create the table
+ // Create a test user and grant that user permission to alter the table
+ final String tableName = getUniqueNames(1)[0];
+ final Connector c = getConnector();
+ c.securityOperations().createLocalUser(testUser, (testToken instanceof PasswordToken ? (PasswordToken) testToken : null));
+ c.tableOperations().create(tableName);
+
+ // Set variables for the property name to use and the initial value
+ String propertyName = "table.custom.description";
+ String description1 = "Description";
+
+ // Make sure the property name is valid
+ Assert.assertTrue(Property.isValidPropertyKey(propertyName));
+
+ // Getting a fresh token will ensure we're logged in as this user (if necessary)
+ Connector testConn = c.getInstance().getConnector(testUser, user.getToken());
+
+ // Try to set the property to the desired value.
+ // If able to set it, the test fails, since permission was never granted
+ try {
+ testConn.tableOperations().setProperty(tableName, propertyName, description1);
+ Assert.fail("Was able to set property without permissions");
+ } catch (AccumuloSecurityException e) {}
+
+ // Loop through properties to make sure the new property is not added to the list
+ int count = 0;
+ for (Entry<String,String> property : testConn.tableOperations().getProperties(tableName)) {
+ if (property.getKey().equals(propertyName))
+ count++;
+ }
+ Assert.assertEquals(count, 0);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/AssignmentThreadsIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/AssignmentThreadsIT.java b/test/src/main/java/org/apache/accumulo/test/AssignmentThreadsIT.java
new file mode 100644
index 0000000..c9a83a6
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/AssignmentThreadsIT.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.Random;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+// ACCUMULO-1177
+public class AssignmentThreadsIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.TSERV_ASSIGNMENT_MAXCONCURRENT, "1");
+ }
+
+ // [0-9a-f]
+ private final static byte[] HEXCHARS = {0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66};
+ private final static Random random = new Random();
+
+ public static byte[] randomHex(int n) {
+ byte[] binary = new byte[n];
+ byte[] hex = new byte[n * 2];
+ random.nextBytes(binary);
+ int count = 0;
+ for (byte x : binary) {
+ hex[count++] = HEXCHARS[(x >> 4) & 0xf];
+ hex[count++] = HEXCHARS[x & 0xf];
+ }
+ return hex;
+ }
+
+ @Test(timeout = 5 * 60 * 1000)
+ public void testConcurrentAssignmentPerformance() throws Exception {
+ // make a table with a lot of splits
+ String tableName = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ log.info("Creating table");
+ c.tableOperations().create(tableName);
+ SortedSet<Text> splits = new TreeSet<Text>();
+ for (int i = 0; i < 1000; i++) {
+ splits.add(new Text(randomHex(8)));
+ }
+ log.info("Adding splits");
+ c.tableOperations().addSplits(tableName, splits);
+ log.info("Taking table offline");
+ c.tableOperations().offline(tableName, true);
+ // time how long it takes to load
+ log.info("Bringing the table online");
+ long now = System.currentTimeMillis();
+ c.tableOperations().online(tableName, true);
+ long diff = System.currentTimeMillis() - now;
+ log.info("Loaded " + splits.size() + " tablets in " + diff + " ms");
+ c.instanceOperations().setProperty(Property.TSERV_ASSIGNMENT_MAXCONCURRENT.getKey(), "20");
+ now = System.currentTimeMillis();
+ log.info("Taking table offline, again");
+ c.tableOperations().offline(tableName, true);
+ // wait >10 seconds for thread pool to update
+ UtilWaitThread.sleep(Math.max(0, now + 11 * 1000 - System.currentTimeMillis()));
+ now = System.currentTimeMillis();
+ log.info("Bringing table back online");
+ c.tableOperations().online(tableName, true);
+ long diff2 = System.currentTimeMillis() - now;
+ log.debug("Loaded " + splits.size() + " tablets in " + diff2 + " ms");
+ assertTrue(diff2 < diff);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java b/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java
new file mode 100644
index 0000000..1eb2373
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/AuditMessageIT.java
@@ -0,0 +1,506 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.SystemPermission;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.security.AuditedSecurityOperation;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.LineIterator;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests that Accumulo is outputting audit messages as expected. Since this is using MiniAccumuloCluster, it could take a while if we test everything in
+ * isolation. We test blocks of related operations, run the whole test in one MiniAccumulo instance, trying to clean up objects between each test. The
+ * MiniAccumuloClusterTest sets up the log4j stuff differently to an installed instance, instead piping everything through stdout and writing to a set location
+ * so we have to find the logs and grep the bits we need out.
+ */
+public class AuditMessageIT extends ConfigurableMacBase {
+
+ private static final String AUDIT_USER_1 = "AuditUser1";
+ private static final String AUDIT_USER_2 = "AuditUser2";
+ private static final String PASSWORD = "password";
+ private static final String OLD_TEST_TABLE_NAME = "apples";
+ private static final String NEW_TEST_TABLE_NAME = "oranges";
+ private static final String THIRD_TEST_TABLE_NAME = "pears";
+ private static final Authorizations auths = new Authorizations("private", "public");
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Override
+ public void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws Exception {
+ File f = new File(cfg.getConfDir(), "auditLog.xml");
+ if (f.delete()) {
+ log.debug("Deleted " + f);
+ }
+ }
+
+ // Must be static to survive Junit re-initialising the class every time.
+ private static String lastAuditTimestamp;
+ private Connector auditConnector;
+ private Connector conn;
+
+ private static ArrayList<String> findAuditMessage(ArrayList<String> input, String pattern) {
+ ArrayList<String> result = new ArrayList<String>();
+ for (String s : input) {
+ if (s.matches(".*" + pattern + ".*"))
+ result.add(s);
+ }
+ return result;
+ }
+
+ /**
+ * Returns a List of Audit messages that have been grep'd out of the MiniAccumuloCluster output.
+ *
+ * @param stepName
+ * A unique name for the test being executed, to identify the System.out messages.
+ * @return A List of the Audit messages, sorted (so in chronological order).
+ */
+ private ArrayList<String> getAuditMessages(String stepName) throws IOException {
+ // ACCUMULO-3144 Make sure we give the processes enough time to flush the write buffer
+ try {
+ Thread.sleep(2000);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new IOException("Interrupted waiting for data to be flushed to output streams");
+ }
+
+ for (MiniAccumuloClusterImpl.LogWriter lw : getCluster().getLogWriters()) {
+ lw.flush();
+ }
+
+ // Grab the audit messages
+ System.out.println("Start of captured audit messages for step " + stepName);
+
+ ArrayList<String> result = new ArrayList<String>();
+ File[] files = getCluster().getConfig().getLogDir().listFiles();
+ assertNotNull(files);
+ for (File file : files) {
+ // We want to grab the files called .out
+ if (file.getName().contains(".out") && file.isFile() && file.canRead()) {
+ LineIterator it = FileUtils.lineIterator(file, UTF_8.name());
+ try {
+ while (it.hasNext()) {
+ String line = it.nextLine();
+ if (line.matches(".* \\[" + AuditedSecurityOperation.AUDITLOG + "\\s*\\].*")) {
+ // Only include the message if startTimestamp is null. or the message occurred after the startTimestamp value
+ if ((lastAuditTimestamp == null) || (line.substring(0, 23).compareTo(lastAuditTimestamp) > 0))
+ result.add(line);
+ }
+ }
+ } finally {
+ LineIterator.closeQuietly(it);
+ }
+ }
+ }
+ Collections.sort(result);
+
+ for (String s : result) {
+ System.out.println(s);
+ }
+ System.out.println("End of captured audit messages for step " + stepName);
+ if (result.size() > 0)
+ lastAuditTimestamp = (result.get(result.size() - 1)).substring(0, 23);
+
+ return result;
+ }
+
+ private void grantEverySystemPriv(Connector conn, String user) throws AccumuloSecurityException, AccumuloException {
+ SystemPermission[] arrayOfP = new SystemPermission[] {SystemPermission.SYSTEM, SystemPermission.ALTER_TABLE, SystemPermission.ALTER_USER,
+ SystemPermission.CREATE_TABLE, SystemPermission.CREATE_USER, SystemPermission.DROP_TABLE, SystemPermission.DROP_USER};
+ for (SystemPermission p : arrayOfP) {
+ conn.securityOperations().grantSystemPermission(user, p);
+ }
+ }
+
+ @Before
+ public void resetInstance() throws Exception {
+ conn = getConnector();
+
+ removeUsersAndTables();
+
+ // This will set the lastAuditTimestamp for the first test
+ getAuditMessages("setup");
+ }
+
+ @After
+ public void removeUsersAndTables() throws Exception {
+ for (String user : Arrays.asList(AUDIT_USER_1, AUDIT_USER_2)) {
+ if (conn.securityOperations().listLocalUsers().contains(user)) {
+ conn.securityOperations().dropLocalUser(user);
+ }
+ }
+
+ TableOperations tops = conn.tableOperations();
+ for (String table : Arrays.asList(THIRD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME, OLD_TEST_TABLE_NAME)) {
+ if (tops.exists(table)) {
+ tops.delete(table);
+ }
+ }
+ }
+
+ @Test
+ public void testTableOperationsAudits() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException,
+ InterruptedException {
+
+ conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
+ conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
+ conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.CREATE_TABLE);
+
+ // Connect as Audit User and do a bunch of stuff.
+ // Testing activity begins here
+ auditConnector = getCluster().getConnector(AUDIT_USER_1, new PasswordToken(PASSWORD));
+ auditConnector.tableOperations().create(OLD_TEST_TABLE_NAME);
+ auditConnector.tableOperations().rename(OLD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME);
+ Map<String,String> emptyMap = Collections.emptyMap();
+ Set<String> emptySet = Collections.emptySet();
+ auditConnector.tableOperations().clone(NEW_TEST_TABLE_NAME, OLD_TEST_TABLE_NAME, true, emptyMap, emptySet);
+ auditConnector.tableOperations().delete(OLD_TEST_TABLE_NAME);
+ auditConnector.tableOperations().offline(NEW_TEST_TABLE_NAME);
+ auditConnector.tableOperations().delete(NEW_TEST_TABLE_NAME);
+ // Testing activity ends here
+
+ ArrayList<String> auditMessages = getAuditMessages("testTableOperationsAudits");
+
+ assertEquals(1, findAuditMessage(auditMessages, "action: createTable; targetTable: " + OLD_TEST_TABLE_NAME).size());
+ assertEquals(1, findAuditMessage(auditMessages, "action: renameTable; targetTable: " + OLD_TEST_TABLE_NAME).size());
+ assertEquals(1, findAuditMessage(auditMessages, "action: cloneTable; targetTable: " + NEW_TEST_TABLE_NAME).size());
+ assertEquals(1, findAuditMessage(auditMessages, "action: deleteTable; targetTable: " + OLD_TEST_TABLE_NAME).size());
+ assertEquals(1, findAuditMessage(auditMessages, "action: offlineTable; targetTable: " + NEW_TEST_TABLE_NAME).size());
+ assertEquals(1, findAuditMessage(auditMessages, "action: deleteTable; targetTable: " + NEW_TEST_TABLE_NAME).size());
+
+ }
+
+ @Test
+ public void testUserOperationsAudits() throws AccumuloSecurityException, AccumuloException, TableExistsException, InterruptedException, IOException {
+
+ conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
+ conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
+ conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.CREATE_USER);
+ grantEverySystemPriv(conn, AUDIT_USER_1);
+
+ // Connect as Audit User and do a bunch of stuff.
+ // Start testing activities here
+ auditConnector = getCluster().getConnector(AUDIT_USER_1, new PasswordToken(PASSWORD));
+ auditConnector.securityOperations().createLocalUser(AUDIT_USER_2, new PasswordToken(PASSWORD));
+
+ // It seems only root can grant stuff.
+ conn.securityOperations().grantSystemPermission(AUDIT_USER_2, SystemPermission.ALTER_TABLE);
+ conn.securityOperations().revokeSystemPermission(AUDIT_USER_2, SystemPermission.ALTER_TABLE);
+ auditConnector.tableOperations().create(NEW_TEST_TABLE_NAME);
+ conn.securityOperations().grantTablePermission(AUDIT_USER_2, NEW_TEST_TABLE_NAME, TablePermission.READ);
+ conn.securityOperations().revokeTablePermission(AUDIT_USER_2, NEW_TEST_TABLE_NAME, TablePermission.READ);
+ auditConnector.securityOperations().changeLocalUserPassword(AUDIT_USER_2, new PasswordToken("anything"));
+ auditConnector.securityOperations().changeUserAuthorizations(AUDIT_USER_2, auths);
+ auditConnector.securityOperations().dropLocalUser(AUDIT_USER_2);
+ // Stop testing activities here
+
+ ArrayList<String> auditMessages = getAuditMessages("testUserOperationsAudits");
+
+ assertEquals(1, findAuditMessage(auditMessages, "action: createUser; targetUser: " + AUDIT_USER_2).size());
+ assertEquals(
+ 1,
+ findAuditMessage(auditMessages,
+ "action: grantSystemPermission; permission: " + SystemPermission.ALTER_TABLE.toString() + "; targetUser: " + AUDIT_USER_2).size());
+ assertEquals(
+ 1,
+ findAuditMessage(auditMessages,
+ "action: revokeSystemPermission; permission: " + SystemPermission.ALTER_TABLE.toString() + "; targetUser: " + AUDIT_USER_2).size());
+ assertEquals(
+ 1,
+ findAuditMessage(auditMessages,
+ "action: grantTablePermission; permission: " + TablePermission.READ.toString() + "; targetTable: " + NEW_TEST_TABLE_NAME).size());
+ assertEquals(
+ 1,
+ findAuditMessage(auditMessages,
+ "action: revokeTablePermission; permission: " + TablePermission.READ.toString() + "; targetTable: " + NEW_TEST_TABLE_NAME).size());
+ assertEquals(1, findAuditMessage(auditMessages, "action: changePassword; targetUser: " + AUDIT_USER_2 + "").size());
+ assertEquals(1, findAuditMessage(auditMessages, "action: changeAuthorizations; targetUser: " + AUDIT_USER_2 + "; authorizations: " + auths.toString())
+ .size());
+ assertEquals(1, findAuditMessage(auditMessages, "action: dropUser; targetUser: " + AUDIT_USER_2).size());
+ }
+
+ @Test
+ public void testImportExportOperationsAudits() throws AccumuloSecurityException, AccumuloException, TableExistsException, TableNotFoundException,
+ IOException, InterruptedException {
+
+ conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
+ conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
+ conn.securityOperations().changeUserAuthorizations(AUDIT_USER_1, auths);
+ grantEverySystemPriv(conn, AUDIT_USER_1);
+
+ // Connect as Audit User and do a bunch of stuff.
+ // Start testing activities here
+ auditConnector = getCluster().getConnector(AUDIT_USER_1, new PasswordToken(PASSWORD));
+ auditConnector.tableOperations().create(OLD_TEST_TABLE_NAME);
+
+ // Insert some play data
+ BatchWriter bw = auditConnector.createBatchWriter(OLD_TEST_TABLE_NAME, new BatchWriterConfig());
+ Mutation m = new Mutation("myRow");
+ m.put("cf1", "cq1", "v1");
+ m.put("cf1", "cq2", "v3");
+ bw.addMutation(m);
+ bw.close();
+
+ // Prepare to export the table
+ File exportDir = new File(getCluster().getConfig().getDir().toString() + "/export");
+
+ auditConnector.tableOperations().offline(OLD_TEST_TABLE_NAME);
+ auditConnector.tableOperations().exportTable(OLD_TEST_TABLE_NAME, exportDir.toString());
+
+ // We've exported the table metadata to the MiniAccumuloCluster root dir. Grab the .rf file path to re-import it
+ File distCpTxt = new File(exportDir.toString() + "/distcp.txt");
+ File importFile = null;
+ LineIterator it = FileUtils.lineIterator(distCpTxt, UTF_8.name());
+
+ // Just grab the first rf file, it will do for now.
+ String filePrefix = "file:";
+ try {
+ while (it.hasNext() && importFile == null) {
+ String line = it.nextLine();
+ if (line.matches(".*\\.rf")) {
+ importFile = new File(line.replaceFirst(filePrefix, ""));
+ }
+ }
+ } finally {
+ LineIterator.closeQuietly(it);
+ }
+ FileUtils.copyFileToDirectory(importFile, exportDir);
+ auditConnector.tableOperations().importTable(NEW_TEST_TABLE_NAME, exportDir.toString());
+
+ // Now do a Directory (bulk) import of the same data.
+ auditConnector.tableOperations().create(THIRD_TEST_TABLE_NAME);
+ File failDir = new File(exportDir + "/tmp");
+ assertTrue(failDir.mkdirs() || failDir.isDirectory());
+ auditConnector.tableOperations().importDirectory(THIRD_TEST_TABLE_NAME, exportDir.toString(), failDir.toString(), false);
+ auditConnector.tableOperations().online(OLD_TEST_TABLE_NAME);
+
+ // Stop testing activities here
+
+ ArrayList<String> auditMessages = getAuditMessages("testImportExportOperationsAudits");
+
+ assertEquals(1, findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_CREATE_TABLE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME)).size());
+ assertEquals(1,
+ findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE, "offlineTable", OLD_TEST_TABLE_NAME))
+ .size());
+ assertEquals(1,
+ findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_EXPORT_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME, exportDir.toString())).size());
+ assertEquals(
+ 1,
+ findAuditMessage(auditMessages,
+ String.format(AuditedSecurityOperation.CAN_IMPORT_AUDIT_TEMPLATE, NEW_TEST_TABLE_NAME, filePrefix + exportDir.toString())).size());
+ assertEquals(1, findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_CREATE_TABLE_AUDIT_TEMPLATE, THIRD_TEST_TABLE_NAME)).size());
+ assertEquals(
+ 1,
+ findAuditMessage(
+ auditMessages,
+ String.format(AuditedSecurityOperation.CAN_BULK_IMPORT_AUDIT_TEMPLATE, THIRD_TEST_TABLE_NAME, filePrefix + exportDir.toString(), filePrefix
+ + failDir.toString())).size());
+ assertEquals(1,
+ findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE, "onlineTable", OLD_TEST_TABLE_NAME))
+ .size());
+
+ }
+
+ @Test
+ public void testDataOperationsAudits() throws AccumuloSecurityException, AccumuloException, TableExistsException, TableNotFoundException, IOException,
+ InterruptedException {
+
+ conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
+ conn.securityOperations().grantSystemPermission(AUDIT_USER_1, SystemPermission.SYSTEM);
+ conn.securityOperations().changeUserAuthorizations(AUDIT_USER_1, auths);
+ grantEverySystemPriv(conn, AUDIT_USER_1);
+
+ // Connect as Audit User and do a bunch of stuff.
+ // Start testing activities here
+ auditConnector = getCluster().getConnector(AUDIT_USER_1, new PasswordToken(PASSWORD));
+ auditConnector.tableOperations().create(OLD_TEST_TABLE_NAME);
+
+ // Insert some play data
+ BatchWriter bw = auditConnector.createBatchWriter(OLD_TEST_TABLE_NAME, new BatchWriterConfig());
+ Mutation m = new Mutation("myRow");
+ m.put("cf1", "cq1", "v1");
+ m.put("cf1", "cq2", "v3");
+ bw.addMutation(m);
+ bw.close();
+
+ // Start testing activities here
+ // A regular scan
+ Scanner scanner = auditConnector.createScanner(OLD_TEST_TABLE_NAME, auths);
+ for (Map.Entry<Key,Value> entry : scanner) {
+ System.out.println("Scanner row: " + entry.getKey() + " " + entry.getValue());
+ }
+ scanner.close();
+
+ // A batch scan
+ BatchScanner bs = auditConnector.createBatchScanner(OLD_TEST_TABLE_NAME, auths, 1);
+ bs.fetchColumn(new Text("cf1"), new Text("cq1"));
+ bs.setRanges(Arrays.asList(new Range("myRow", "myRow~")));
+
+ for (Map.Entry<Key,Value> entry : bs) {
+ System.out.println("BatchScanner row: " + entry.getKey() + " " + entry.getValue());
+ }
+ bs.close();
+
+ // Delete some data.
+ auditConnector.tableOperations().deleteRows(OLD_TEST_TABLE_NAME, new Text("myRow"), new Text("myRow~"));
+
+ // End of testing activities
+
+ ArrayList<String> auditMessages = getAuditMessages("testDataOperationsAudits");
+ assertTrue(1 <= findAuditMessage(auditMessages, "action: scan; targetTable: " + OLD_TEST_TABLE_NAME).size());
+ assertTrue(1 <= findAuditMessage(auditMessages, "action: scan; targetTable: " + OLD_TEST_TABLE_NAME).size());
+ assertEquals(1,
+ findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CAN_DELETE_RANGE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME, "myRow", "myRow~")).size());
+
+ }
+
+ @Test
+ public void testDeniedAudits() throws AccumuloSecurityException, AccumuloException, TableExistsException, TableNotFoundException, IOException,
+ InterruptedException {
+
+ // Create our user with no privs
+ conn.securityOperations().createLocalUser(AUDIT_USER_1, new PasswordToken(PASSWORD));
+ conn.tableOperations().create(OLD_TEST_TABLE_NAME);
+ auditConnector = getCluster().getConnector(AUDIT_USER_1, new PasswordToken(PASSWORD));
+
+ // Start testing activities
+ // We should get denied or / failed audit messages here.
+ // We don't want the thrown exceptions to stop our tests, and we are not testing that the Exceptions are thrown.
+
+ try {
+ auditConnector.tableOperations().create(NEW_TEST_TABLE_NAME);
+ } catch (AccumuloSecurityException ex) {}
+ try {
+ auditConnector.tableOperations().rename(OLD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME);
+ } catch (AccumuloSecurityException ex) {}
+ try {
+ auditConnector.tableOperations().clone(OLD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME, true, Collections.<String,String> emptyMap(),
+ Collections.<String> emptySet());
+ } catch (AccumuloSecurityException ex) {}
+ try {
+ auditConnector.tableOperations().delete(OLD_TEST_TABLE_NAME);
+ } catch (AccumuloSecurityException ex) {}
+ try {
+ auditConnector.tableOperations().offline(OLD_TEST_TABLE_NAME);
+ } catch (AccumuloSecurityException ex) {}
+ try {
+ Scanner scanner = auditConnector.createScanner(OLD_TEST_TABLE_NAME, auths);
+ scanner.iterator().next().getKey();
+ } catch (RuntimeException ex) {}
+ try {
+ auditConnector.tableOperations().deleteRows(OLD_TEST_TABLE_NAME, new Text("myRow"), new Text("myRow~"));
+ } catch (AccumuloSecurityException ex) {}
+
+ // ... that will do for now.
+ // End of testing activities
+
+ ArrayList<String> auditMessages = getAuditMessages("testDeniedAudits");
+ assertEquals(1,
+ findAuditMessage(auditMessages, "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_CREATE_TABLE_AUDIT_TEMPLATE, NEW_TEST_TABLE_NAME))
+ .size());
+ assertEquals(
+ 1,
+ findAuditMessage(auditMessages,
+ "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_RENAME_TABLE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME)).size());
+ assertEquals(
+ 1,
+ findAuditMessage(auditMessages,
+ "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_CLONE_TABLE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME, NEW_TEST_TABLE_NAME)).size());
+ assertEquals(1,
+ findAuditMessage(auditMessages, "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_DELETE_TABLE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME))
+ .size());
+ assertEquals(
+ 1,
+ findAuditMessage(auditMessages,
+ "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_ONLINE_OFFLINE_TABLE_AUDIT_TEMPLATE, "offlineTable", OLD_TEST_TABLE_NAME))
+ .size());
+ assertEquals(1, findAuditMessage(auditMessages, "operation: denied;.*" + "action: scan; targetTable: " + OLD_TEST_TABLE_NAME).size());
+ assertEquals(
+ 1,
+ findAuditMessage(auditMessages,
+ "operation: denied;.*" + String.format(AuditedSecurityOperation.CAN_DELETE_RANGE_AUDIT_TEMPLATE, OLD_TEST_TABLE_NAME, "myRow", "myRow~")).size());
+ }
+
+ @Test
+ public void testFailedAudits() throws AccumuloSecurityException, AccumuloException, TableExistsException, TableNotFoundException, IOException,
+ InterruptedException {
+
+ // Start testing activities
+ // Test that we get a few "failed" audit messages come through when we tell it to do dumb stuff
+ // We don't want the thrown exceptions to stop our tests, and we are not testing that the Exceptions are thrown.
+ try {
+ conn.securityOperations().dropLocalUser(AUDIT_USER_2);
+ } catch (AccumuloSecurityException ex) {}
+ try {
+ conn.securityOperations().revokeSystemPermission(AUDIT_USER_2, SystemPermission.ALTER_TABLE);
+ } catch (AccumuloSecurityException ex) {}
+ try {
+ conn.securityOperations().createLocalUser("root", new PasswordToken("super secret"));
+ } catch (AccumuloSecurityException ex) {}
+ ArrayList<String> auditMessages = getAuditMessages("testFailedAudits");
+ // ... that will do for now.
+ // End of testing activities
+
+ assertEquals(1, findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.DROP_USER_AUDIT_TEMPLATE, AUDIT_USER_2)).size());
+ assertEquals(
+ 1,
+ findAuditMessage(auditMessages,
+ String.format(AuditedSecurityOperation.REVOKE_SYSTEM_PERMISSION_AUDIT_TEMPLATE, SystemPermission.ALTER_TABLE, AUDIT_USER_2)).size());
+ assertEquals(1, findAuditMessage(auditMessages, String.format(AuditedSecurityOperation.CREATE_USER_AUDIT_TEMPLATE, "root", "")).size());
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java b/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
new file mode 100644
index 0000000..5b0b84d
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/BadDeleteMarkersCreatedIT.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.InstanceOperations;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.ZooCache;
+import org.apache.accumulo.fate.zookeeper.ZooLock;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+// Accumulo3047
+public class BadDeleteMarkersCreatedIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(BadDeleteMarkersCreatedIT.class);
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 120;
+ }
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
+ cfg.setProperty(Property.GC_CYCLE_START, "0s");
+ }
+
+ private int timeoutFactor = 1;
+
+ @Before
+ public void getTimeoutFactor() {
+ try {
+ timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
+ } catch (NumberFormatException e) {
+ log.warn("Could not parse integer from timeout.factor");
+ }
+
+ Assert.assertTrue("timeout.factor must be greater than or equal to 1", timeoutFactor >= 1);
+ }
+
+ private String gcCycleDelay, gcCycleStart;
+
+ @Before
+ public void alterConfig() throws Exception {
+ InstanceOperations iops = getConnector().instanceOperations();
+ Map<String,String> config = iops.getSystemConfiguration();
+ gcCycleDelay = config.get(Property.GC_CYCLE_DELAY.getKey());
+ gcCycleStart = config.get(Property.GC_CYCLE_START.getKey());
+ iops.setProperty(Property.GC_CYCLE_DELAY.getKey(), "1s");
+ iops.setProperty(Property.GC_CYCLE_START.getKey(), "0s");
+ log.info("Restarting garbage collector");
+
+ getCluster().getClusterControl().stopAllServers(ServerType.GARBAGE_COLLECTOR);
+
+ Instance instance = getConnector().getInstance();
+ ZooCache zcache = new ZooCache(instance.getZooKeepers(), instance.getZooKeepersSessionTimeOut());
+ zcache.clear();
+ String path = ZooUtil.getRoot(instance) + Constants.ZGC_LOCK;
+ byte[] gcLockData;
+ do {
+ gcLockData = ZooLock.getLockData(zcache, path, null);
+ if (null != gcLockData) {
+ log.info("Waiting for GC ZooKeeper lock to expire");
+ Thread.sleep(2000);
+ }
+ } while (null != gcLockData);
+
+ log.info("GC lock was lost");
+
+ getCluster().getClusterControl().startAllServers(ServerType.GARBAGE_COLLECTOR);
+ log.info("Garbage collector was restarted");
+
+ gcLockData = null;
+ do {
+ gcLockData = ZooLock.getLockData(zcache, path, null);
+ if (null == gcLockData) {
+ log.info("Waiting for GC ZooKeeper lock to be acquired");
+ Thread.sleep(2000);
+ }
+ } while (null == gcLockData);
+
+ log.info("GC lock was acquired");
+ }
+
+ @After
+ public void restoreConfig() throws Exception {
+ InstanceOperations iops = getConnector().instanceOperations();
+ if (null != gcCycleDelay) {
+ iops.setProperty(Property.GC_CYCLE_DELAY.getKey(), gcCycleDelay);
+ }
+ if (null != gcCycleStart) {
+ iops.setProperty(Property.GC_CYCLE_START.getKey(), gcCycleStart);
+ }
+ log.info("Restarting garbage collector");
+ getCluster().getClusterControl().stopAllServers(ServerType.GARBAGE_COLLECTOR);
+ getCluster().getClusterControl().startAllServers(ServerType.GARBAGE_COLLECTOR);
+ log.info("Garbage collector was restarted");
+ }
+
+ @Test
+ public void test() throws Exception {
+ // make a table
+ String tableName = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ log.info("Creating table to be deleted");
+ c.tableOperations().create(tableName);
+ final String tableId = c.tableOperations().tableIdMap().get(tableName);
+ Assert.assertNotNull("Expected to find a tableId", tableId);
+
+ // add some splits
+ SortedSet<Text> splits = new TreeSet<Text>();
+ for (int i = 0; i < 10; i++) {
+ splits.add(new Text("" + i));
+ }
+ c.tableOperations().addSplits(tableName, splits);
+ // get rid of all the splits
+ c.tableOperations().deleteRows(tableName, null, null);
+ // get rid of the table
+ c.tableOperations().delete(tableName);
+ log.info("Sleeping to let garbage collector run");
+ // let gc run
+ UtilWaitThread.sleep(timeoutFactor * 15 * 1000);
+ log.info("Verifying that delete markers were deleted");
+ // look for delete markers
+ Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ scanner.setRange(MetadataSchema.DeletesSection.getRange());
+ for (Entry<Key,Value> entry : scanner) {
+ String row = entry.getKey().getRow().toString();
+ if (!row.contains("/" + tableId + "/")) {
+ log.info("Ignoring delete entry for a table other than the one we deleted");
+ continue;
+ }
+ Assert.fail("Delete entry should have been deleted by the garbage collector: " + entry.getKey().getRow().toString());
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/BalanceFasterIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/BalanceFasterIT.java b/test/src/main/java/org/apache/accumulo/test/BalanceFasterIT.java
new file mode 100644
index 0000000..bf9f5f0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/BalanceFasterIT.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+// ACCUMULO-2952
+public class BalanceFasterIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(3);
+ }
+
+ @Test(timeout = 90 * 1000)
+ public void test() throws Exception {
+ // create a table, add a bunch of splits
+ String tableName = getUniqueNames(1)[0];
+ Connector conn = getConnector();
+ conn.tableOperations().create(tableName);
+ SortedSet<Text> splits = new TreeSet<Text>();
+ for (int i = 0; i < 1000; i++) {
+ splits.add(new Text("" + i));
+ }
+ conn.tableOperations().addSplits(tableName, splits);
+ // give a short wait for balancing
+ UtilWaitThread.sleep(10 * 1000);
+ // find out where the tabets are
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
+ s.setRange(MetadataSchema.TabletsSection.getRange());
+ Map<String,Integer> counts = new HashMap<String,Integer>();
+ while (true) {
+ int total = 0;
+ counts.clear();
+ for (Entry<Key,Value> kv : s) {
+ String host = kv.getValue().toString();
+ if (!counts.containsKey(host))
+ counts.put(host, 0);
+ counts.put(host, counts.get(host) + 1);
+ total++;
+ }
+ // are enough tablets online?
+ if (total > 1000)
+ break;
+ }
+ // should be on all three servers
+ assertTrue(counts.size() == 3);
+ // and distributed evenly
+ Iterator<Integer> i = counts.values().iterator();
+ int a = i.next();
+ int b = i.next();
+ int c = i.next();
+ assertTrue(Math.abs(a - b) < 3);
+ assertTrue(Math.abs(a - c) < 3);
+ assertTrue(a > 330);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/BalanceIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/BalanceIT.java b/test/src/main/java/org/apache/accumulo/test/BalanceIT.java
new file mode 100644
index 0000000..605ac94
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/BalanceIT.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class BalanceIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(BalanceIT.class);
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void testBalance() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ log.info("Creating table");
+ c.tableOperations().create(tableName);
+ SortedSet<Text> splits = new TreeSet<Text>();
+ for (int i = 0; i < 10; i++) {
+ splits.add(new Text("" + i));
+ }
+ log.info("Adding splits");
+ c.tableOperations().addSplits(tableName, splits);
+ log.info("Waiting for balance");
+ c.instanceOperations().waitForBalance();
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java b/test/src/main/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java
new file mode 100644
index 0000000..9acefc4
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/BalanceWithOfflineTableIT.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.concurrent.Callable;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.util.SimpleThreadPool;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+// ACCUMULO-3692
+public class BalanceWithOfflineTableIT extends ConfigurableMacBase {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 30;
+ }
+
+ @Override
+ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {}
+
+ @Test
+ public void test() throws Exception {
+ final String tableNames[] = getUniqueNames(2);
+ final String tableName = tableNames[0];
+ // create a table with a bunch of splits
+
+ final Connector c = getConnector();
+ log.info("Creating table " + tableName);
+ c.tableOperations().create(tableName);
+ ;
+ final SortedSet<Text> splits = new TreeSet<>();
+ for (String split : "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",")) {
+ splits.add(new Text(split));
+ }
+ log.info("Splitting table " + tableName);
+ c.tableOperations().addSplits(tableName, splits);
+ log.info("Balancing");
+ c.instanceOperations().waitForBalance();
+ log.info("Balanced");
+
+ // create a new table which will unbalance the cluster
+ final String table2 = tableNames[1];
+ log.info("Creating table " + table2);
+ c.tableOperations().create(table2);
+ log.info("Creating splits " + table2);
+ c.tableOperations().addSplits(table2, splits);
+
+ // offline the table, hopefully while there are some migrations going on
+ log.info("Offlining " + table2);
+ c.tableOperations().offline(table2, true);
+ log.info("Offlined " + table2);
+
+ log.info("Waiting for balance");
+
+ SimpleThreadPool pool = new SimpleThreadPool(1, "waitForBalance");
+ Future<Boolean> wait = pool.submit(new Callable<Boolean>() {
+ @Override
+ public Boolean call() throws Exception {
+ c.instanceOperations().waitForBalance();
+ return true;
+ }
+ });
+ wait.get(20, TimeUnit.SECONDS);
+ log.info("Balance succeeded with an offline table");
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/BatchWriterIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/BatchWriterIT.java b/test/src/main/java/org/apache/accumulo/test/BatchWriterIT.java
new file mode 100644
index 0000000..11fc595
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/BatchWriterIT.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.junit.Test;
+
+public class BatchWriterIT extends AccumuloClusterHarness {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 30;
+ }
+
+ @Test
+ public void test() throws Exception {
+ // call the batchwriter with buffer of size zero
+ String table = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ c.tableOperations().create(table);
+ BatchWriterConfig config = new BatchWriterConfig();
+ config.setMaxMemory(0);
+ BatchWriter writer = c.createBatchWriter(table, config);
+ Mutation m = new Mutation("row");
+ m.put("cf", "cq", new Value("value".getBytes()));
+ writer.addMutation(m);
+ writer.close();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/BulkImportVolumeIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/BulkImportVolumeIT.java b/test/src/main/java/org/apache/accumulo/test/BulkImportVolumeIT.java
new file mode 100644
index 0000000..ce60893
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/BulkImportVolumeIT.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+// ACCUMULO-118/ACCUMULO-2504
+public class BulkImportVolumeIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(BulkImportVolumeIT.class);
+
+ File volDirBase = null;
+ Path v1, v2;
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ File baseDir = cfg.getDir();
+ volDirBase = new File(baseDir, "volumes");
+ File v1f = new File(volDirBase, "v1");
+ File v2f = new File(volDirBase, "v2");
+ v1 = new Path("file://" + v1f.getAbsolutePath());
+ v2 = new Path("file://" + v2f.getAbsolutePath());
+
+ // Run MAC on two locations in the local file system
+ cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString());
+
+ // use raw local file system so walogs sync and flush will work
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Test
+ public void testBulkImportFailure() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ TableOperations to = getConnector().tableOperations();
+ to.create(tableName);
+ FileSystem fs = getFileSystem();
+ Path rootPath = new Path(cluster.getTemporaryPath(), getClass().getName());
+ Path bulk = new Path(rootPath, "bulk");
+ log.info("bulk: {}", bulk);
+ if (fs.exists(bulk)) {
+ fs.delete(bulk, true);
+ }
+ assertTrue(fs.mkdirs(bulk));
+ Path err = new Path(rootPath, "err");
+ log.info("err: {}", err);
+ if (fs.exists(err)) {
+ fs.delete(err, true);
+ }
+ assertTrue(fs.mkdirs(err));
+ Path bogus = new Path(bulk, "bogus.rf");
+ fs.create(bogus).close();
+ log.info("bogus: {}", bogus);
+ assertTrue(fs.exists(bogus));
+ FsShell fsShell = new FsShell(fs.getConf());
+ assertEquals("Failed to chmod " + rootPath, 0, fsShell.run(new String[] {"-chmod", "-R", "777", rootPath.toString()}));
+ log.info("Importing {} into {} with failures directory {}", bulk, tableName, err);
+ to.importDirectory(tableName, bulk.toString(), err.toString(), false);
+ assertEquals(1, fs.listStatus(err).length);
+ }
+
+}
[04/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java b/test/src/test/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java
deleted file mode 100644
index 48dfdbd..0000000
--- a/test/src/test/java/org/apache/accumulo/test/replication/KerberosReplicationIT.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.replication;
-
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.harness.AccumuloITBase;
-import org.apache.accumulo.harness.MiniClusterConfigurationCallback;
-import org.apache.accumulo.harness.MiniClusterHarness;
-import org.apache.accumulo.harness.TestingKdc;
-import org.apache.accumulo.master.replication.SequentialWorkAssigner;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.server.replication.ReplicaSystemFactory;
-import org.apache.accumulo.test.functional.KerberosIT;
-import org.apache.accumulo.tserver.TabletServer;
-import org.apache.accumulo.tserver.replication.AccumuloReplicaSystem;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-/**
- * Ensure that replication occurs using keytabs instead of password (not to mention SASL)
- */
-public class KerberosReplicationIT extends AccumuloITBase {
- private static final Logger log = LoggerFactory.getLogger(KerberosIT.class);
-
- private static TestingKdc kdc;
- private static String krbEnabledForITs = null;
- private static ClusterUser rootUser;
-
- @BeforeClass
- public static void startKdc() throws Exception {
- kdc = new TestingKdc();
- kdc.start();
- krbEnabledForITs = System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION);
- if (null == krbEnabledForITs || !Boolean.parseBoolean(krbEnabledForITs)) {
- System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, "true");
- }
- rootUser = kdc.getRootUser();
- }
-
- @AfterClass
- public static void stopKdc() throws Exception {
- if (null != kdc) {
- kdc.stop();
- }
- if (null != krbEnabledForITs) {
- System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs);
- }
- }
-
- private MiniAccumuloClusterImpl primary, peer;
- private String PRIMARY_NAME = "primary", PEER_NAME = "peer";
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60 * 3;
- }
-
- private MiniClusterConfigurationCallback getConfigCallback(final String name) {
- return new MiniClusterConfigurationCallback() {
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
- cfg.setNumTservers(1);
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
- cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "2M");
- cfg.setProperty(Property.GC_CYCLE_START, "1s");
- cfg.setProperty(Property.GC_CYCLE_DELAY, "5s");
- cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
- cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
- cfg.setProperty(Property.REPLICATION_NAME, name);
- cfg.setProperty(Property.REPLICATION_MAX_UNIT_SIZE, "8M");
- cfg.setProperty(Property.REPLICATION_WORK_ASSIGNER, SequentialWorkAssigner.class.getName());
- cfg.setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX, "1M");
- coreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
- };
- }
-
- @Before
- public void setup() throws Exception {
- MiniClusterHarness harness = new MiniClusterHarness();
-
- // Create a primary and a peer instance, both with the same "root" user
- primary = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"), getConfigCallback(PRIMARY_NAME), kdc);
- primary.start();
-
- peer = harness.create(getClass().getName(), testName.getMethodName() + "_peer", new PasswordToken("unused"), getConfigCallback(PEER_NAME), kdc);
- peer.start();
-
- // Enable kerberos auth
- Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- UserGroupInformation.setConfiguration(conf);
- }
-
- @After
- public void teardown() throws Exception {
- if (null != peer) {
- peer.stop();
- }
- if (null != primary) {
- primary.stop();
- }
- }
-
- @Test
- public void dataReplicatedToCorrectTable() throws Exception {
- // Login as the root user
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
-
- final KerberosToken token = new KerberosToken();
- final Connector primaryConn = primary.getConnector(rootUser.getPrincipal(), token);
- final Connector peerConn = peer.getConnector(rootUser.getPrincipal(), token);
-
- ClusterUser replicationUser = kdc.getClientPrincipal(0);
-
- // Create user for replication to the peer
- peerConn.securityOperations().createLocalUser(replicationUser.getPrincipal(), null);
-
- primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + PEER_NAME, replicationUser.getPrincipal());
- primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_KEYTAB.getKey() + PEER_NAME, replicationUser.getKeytab().getAbsolutePath());
-
- // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
- primaryConn.instanceOperations().setProperty(
- Property.REPLICATION_PEERS.getKey() + PEER_NAME,
- ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
- AccumuloReplicaSystem.buildConfiguration(peerConn.getInstance().getInstanceName(), peerConn.getInstance().getZooKeepers())));
-
- String primaryTable1 = "primary", peerTable1 = "peer";
-
- // Create tables
- primaryConn.tableOperations().create(primaryTable1);
- String masterTableId1 = primaryConn.tableOperations().tableIdMap().get(primaryTable1);
- Assert.assertNotNull(masterTableId1);
-
- peerConn.tableOperations().create(peerTable1);
- String peerTableId1 = peerConn.tableOperations().tableIdMap().get(peerTable1);
- Assert.assertNotNull(peerTableId1);
-
- // Grant write permission
- peerConn.securityOperations().grantTablePermission(replicationUser.getPrincipal(), peerTable1, TablePermission.WRITE);
-
- // Replicate this table to the peerClusterName in a table with the peerTableId table id
- primaryConn.tableOperations().setProperty(primaryTable1, Property.TABLE_REPLICATION.getKey(), "true");
- primaryConn.tableOperations().setProperty(primaryTable1, Property.TABLE_REPLICATION_TARGET.getKey() + PEER_NAME, peerTableId1);
-
- // Write some data to table1
- BatchWriter bw = primaryConn.createBatchWriter(primaryTable1, new BatchWriterConfig());
- long masterTable1Records = 0l;
- for (int rows = 0; rows < 2500; rows++) {
- Mutation m = new Mutation(primaryTable1 + rows);
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- masterTable1Records++;
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- log.info("Wrote all data to primary cluster");
-
- Set<String> filesFor1 = primaryConn.replicationOperations().referencedFiles(primaryTable1);
-
- // Restart the tserver to force a close on the WAL
- for (ProcessReference proc : primary.getProcesses().get(ServerType.TABLET_SERVER)) {
- primary.killProcess(ServerType.TABLET_SERVER, proc);
- }
- primary.exec(TabletServer.class);
-
- log.info("Restarted the tserver");
-
- // Read the data -- the tserver is back up and running and tablets are assigned
- Iterators.size(primaryConn.createScanner(primaryTable1, Authorizations.EMPTY).iterator());
-
- // Wait for both tables to be replicated
- log.info("Waiting for {} for {}", filesFor1, primaryTable1);
- primaryConn.replicationOperations().drain(primaryTable1, filesFor1);
-
- long countTable = 0l;
- for (Entry<Key,Value> entry : peerConn.createScanner(peerTable1, Authorizations.EMPTY)) {
- countTable++;
- Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
- .startsWith(primaryTable1));
- }
-
- log.info("Found {} records in {}", countTable, peerTable1);
- Assert.assertEquals(masterTable1Records, countTable);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java b/test/src/test/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
deleted file mode 100644
index b6888db..0000000
--- a/test/src/test/java/org/apache/accumulo/test/replication/MultiInstanceReplicationIT.java
+++ /dev/null
@@ -1,731 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.replication;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
-import org.apache.accumulo.core.protobuf.ProtobufUtil;
-import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection;
-import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.master.replication.SequentialWorkAssigner;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.server.replication.ReplicaSystemFactory;
-import org.apache.accumulo.server.replication.StatusUtil;
-import org.apache.accumulo.server.replication.proto.Replication.Status;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.tserver.TabletServer;
-import org.apache.accumulo.tserver.replication.AccumuloReplicaSystem;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-/**
- * Replication tests which start at least two MAC instances and replicate data between them
- */
-public class MultiInstanceReplicationIT extends ConfigurableMacBase {
- private static final Logger log = LoggerFactory.getLogger(MultiInstanceReplicationIT.class);
-
- private ExecutorService executor;
-
- @Override
- public int defaultTimeoutSeconds() {
- return 10 * 60;
- }
-
- @Before
- public void createExecutor() {
- executor = Executors.newSingleThreadExecutor();
- }
-
- @After
- public void stopExecutor() {
- if (null != executor) {
- executor.shutdownNow();
- }
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "3s");
- cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "2M");
- cfg.setProperty(Property.GC_CYCLE_START, "1s");
- cfg.setProperty(Property.GC_CYCLE_DELAY, "5s");
- cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
- cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
- cfg.setProperty(Property.REPLICATION_MAX_UNIT_SIZE, "8M");
- cfg.setProperty(Property.REPLICATION_NAME, "master");
- cfg.setProperty(Property.REPLICATION_WORK_ASSIGNER, SequentialWorkAssigner.class.getName());
- cfg.setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX, "1M");
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- /**
- * Use the same SSL and credential provider configuration that is set up by AbstractMacIT for the other MAC used for replication
- */
- private void updatePeerConfigFromPrimary(MiniAccumuloConfigImpl primaryCfg, MiniAccumuloConfigImpl peerCfg) {
- // Set the same SSL information from the primary when present
- Map<String,String> primarySiteConfig = primaryCfg.getSiteConfig();
- if ("true".equals(primarySiteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
- Map<String,String> peerSiteConfig = new HashMap<String,String>();
- peerSiteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true");
- String keystorePath = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PATH.getKey());
- Assert.assertNotNull("Keystore Path was null", keystorePath);
- peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), keystorePath);
- String truststorePath = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PATH.getKey());
- Assert.assertNotNull("Truststore Path was null", truststorePath);
- peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), truststorePath);
-
- // Passwords might be stored in CredentialProvider
- String keystorePassword = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey());
- if (null != keystorePassword) {
- peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), keystorePassword);
- }
- String truststorePassword = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey());
- if (null != truststorePassword) {
- peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword);
- }
-
- System.out.println("Setting site configuration for peer " + peerSiteConfig);
- peerCfg.setSiteConfig(peerSiteConfig);
- }
-
- // Use the CredentialProvider if the primary also uses one
- String credProvider = primarySiteConfig.get(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
- if (null != credProvider) {
- Map<String,String> peerSiteConfig = peerCfg.getSiteConfig();
- peerSiteConfig.put(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey(), credProvider);
- peerCfg.setSiteConfig(peerSiteConfig);
- }
- }
-
- @Test(timeout = 10 * 60 * 1000)
- public void dataWasReplicatedToThePeer() throws Exception {
- MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
- ROOT_PASSWORD);
- peerCfg.setNumTservers(1);
- peerCfg.setInstanceName("peer");
- peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
-
- updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
-
- MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
-
- peerCluster.start();
-
- try {
- final Connector connMaster = getConnector();
- final Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
-
- ReplicationTable.setOnline(connMaster);
-
- String peerUserName = "peer", peerPassword = "foo";
-
- String peerClusterName = "peer";
-
- connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
-
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
-
- // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
- connMaster.instanceOperations().setProperty(
- Property.REPLICATION_PEERS.getKey() + peerClusterName,
- ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
- AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
-
- final String masterTable = "master", peerTable = "peer";
-
- connMaster.tableOperations().create(masterTable);
- String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
- Assert.assertNotNull(masterTableId);
-
- connPeer.tableOperations().create(peerTable);
- String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
- Assert.assertNotNull(peerTableId);
-
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
-
- // Replicate this table to the peerClusterName in a table with the peerTableId table id
- connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
-
- // Write some data to table1
- BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
- for (int rows = 0; rows < 5000; rows++) {
- Mutation m = new Mutation(Integer.toString(rows));
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- log.info("Wrote all data to master cluster");
-
- final Set<String> filesNeedingReplication = connMaster.replicationOperations().referencedFiles(masterTable);
-
- log.info("Files to replicate: " + filesNeedingReplication);
-
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
- cluster.exec(TabletServer.class);
-
- log.info("TabletServer restarted");
- Iterators.size(ReplicationTable.getScanner(connMaster).iterator());
- log.info("TabletServer is online");
-
- while (!ReplicationTable.isOnline(connMaster)) {
- log.info("Replication table still offline, waiting");
- Thread.sleep(5000);
- }
-
- log.info("");
- log.info("Fetching metadata records:");
- for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
- if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
- } else {
- log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
- }
- }
-
- log.info("");
- log.info("Fetching replication records:");
- for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
- }
-
- Future<Boolean> future = executor.submit(new Callable<Boolean>() {
-
- @Override
- public Boolean call() throws Exception {
- connMaster.replicationOperations().drain(masterTable, filesNeedingReplication);
- log.info("Drain completed");
- return true;
- }
-
- });
-
- try {
- future.get(60, TimeUnit.SECONDS);
- } catch (TimeoutException e) {
- future.cancel(true);
- Assert.fail("Drain did not finish within 60 seconds");
- } finally {
- executor.shutdownNow();
- }
-
- log.info("drain completed");
-
- log.info("");
- log.info("Fetching metadata records:");
- for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
- if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
- } else {
- log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
- }
- }
-
- log.info("");
- log.info("Fetching replication records:");
- for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
- }
-
- Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
- Iterator<Entry<Key,Value>> masterIter = master.iterator(), peerIter = peer.iterator();
- Entry<Key,Value> masterEntry = null, peerEntry = null;
- while (masterIter.hasNext() && peerIter.hasNext()) {
- masterEntry = masterIter.next();
- peerEntry = peerIter.next();
- Assert.assertEquals(masterEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0,
- masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
- Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
- }
-
- log.info("Last master entry: " + masterEntry);
- log.info("Last peer entry: " + peerEntry);
-
- Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
- Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
- } finally {
- peerCluster.stop();
- }
- }
-
- @Test
- public void dataReplicatedToCorrectTable() throws Exception {
- MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
- ROOT_PASSWORD);
- peerCfg.setNumTservers(1);
- peerCfg.setInstanceName("peer");
- peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
-
- updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
-
- MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);
-
- peer1Cluster.start();
-
- try {
- Connector connMaster = getConnector();
- Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
-
- String peerClusterName = "peer";
- String peerUserName = "peer", peerPassword = "foo";
-
- // Create local user
- connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
-
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
-
- // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
- connMaster.instanceOperations().setProperty(
- Property.REPLICATION_PEERS.getKey() + peerClusterName,
- ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
- AccumuloReplicaSystem.buildConfiguration(peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));
-
- String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2";
-
- // Create tables
- connMaster.tableOperations().create(masterTable1);
- String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
- Assert.assertNotNull(masterTableId1);
-
- connMaster.tableOperations().create(masterTable2);
- String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
- Assert.assertNotNull(masterTableId2);
-
- connPeer.tableOperations().create(peerTable1);
- String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
- Assert.assertNotNull(peerTableId1);
-
- connPeer.tableOperations().create(peerTable2);
- String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
- Assert.assertNotNull(peerTableId2);
-
- // Grant write permission
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);
-
- // Replicate this table to the peerClusterName in a table with the peerTableId table id
- connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1);
-
- connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2);
-
- // Write some data to table1
- BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
- long masterTable1Records = 0l;
- for (int rows = 0; rows < 2500; rows++) {
- Mutation m = new Mutation(masterTable1 + rows);
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- masterTable1Records++;
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- // Write some data to table2
- bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
- long masterTable2Records = 0l;
- for (int rows = 0; rows < 2500; rows++) {
- Mutation m = new Mutation(masterTable2 + rows);
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- masterTable2Records++;
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- log.info("Wrote all data to master cluster");
-
- Set<String> filesFor1 = connMaster.replicationOperations().referencedFiles(masterTable1), filesFor2 = connMaster.replicationOperations().referencedFiles(
- masterTable2);
-
- log.info("Files to replicate for table1: " + filesFor1);
- log.info("Files to replicate for table2: " + filesFor2);
-
- // Restart the tserver to force a close on the WAL
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
- cluster.exec(TabletServer.class);
-
- log.info("Restarted the tserver");
-
- // Read the data -- the tserver is back up and running
- Iterators.size(connMaster.createScanner(masterTable1, Authorizations.EMPTY).iterator());
-
- while (!ReplicationTable.isOnline(connMaster)) {
- log.info("Replication table still offline, waiting");
- Thread.sleep(5000);
- }
-
- // Wait for both tables to be replicated
- log.info("Waiting for {} for {}", filesFor1, masterTable1);
- connMaster.replicationOperations().drain(masterTable1, filesFor1);
-
- log.info("Waiting for {} for {}", filesFor2, masterTable2);
- connMaster.replicationOperations().drain(masterTable2, filesFor2);
-
- long countTable = 0l;
- for (Entry<Key,Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
- countTable++;
- Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
- .startsWith(masterTable1));
- }
-
- log.info("Found {} records in {}", countTable, peerTable1);
- Assert.assertEquals(masterTable1Records, countTable);
-
- countTable = 0l;
- for (Entry<Key,Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
- countTable++;
- Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
- .startsWith(masterTable2));
- }
-
- log.info("Found {} records in {}", countTable, peerTable2);
- Assert.assertEquals(masterTable2Records, countTable);
-
- } finally {
- peer1Cluster.stop();
- }
- }
-
- @Test
- public void dataWasReplicatedToThePeerWithoutDrain() throws Exception {
- MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
- ROOT_PASSWORD);
- peerCfg.setNumTservers(1);
- peerCfg.setInstanceName("peer");
- peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
-
- updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
-
- MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
-
- peerCluster.start();
-
- Connector connMaster = getConnector();
- Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
-
- String peerUserName = "repl";
- String peerPassword = "passwd";
-
- // Create a user on the peer for replication to use
- connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
-
- String peerClusterName = "peer";
-
- // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
- connMaster.instanceOperations().setProperty(
- Property.REPLICATION_PEERS.getKey() + peerClusterName,
- ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
- AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
-
- // Configure the credentials we should use to authenticate ourselves to the peer for replication
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
-
- String masterTable = "master", peerTable = "peer";
-
- connMaster.tableOperations().create(masterTable);
- String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
- Assert.assertNotNull(masterTableId);
-
- connPeer.tableOperations().create(peerTable);
- String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
- Assert.assertNotNull(peerTableId);
-
- // Give our replication user the ability to write to the table
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
-
- // Replicate this table to the peerClusterName in a table with the peerTableId table id
- connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
-
- // Write some data to table1
- BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
- for (int rows = 0; rows < 5000; rows++) {
- Mutation m = new Mutation(Integer.toString(rows));
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- log.info("Wrote all data to master cluster");
-
- Set<String> files = connMaster.replicationOperations().referencedFiles(masterTable);
-
- log.info("Files to replicate:" + files);
-
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
-
- cluster.exec(TabletServer.class);
-
- while (!ReplicationTable.isOnline(connMaster)) {
- log.info("Replication table still offline, waiting");
- Thread.sleep(5000);
- }
-
- Iterators.size(connMaster.createScanner(masterTable, Authorizations.EMPTY).iterator());
-
- for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
- log.debug(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
- }
-
- connMaster.replicationOperations().drain(masterTable, files);
-
- Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
- Iterator<Entry<Key,Value>> masterIter = master.iterator(), peerIter = peer.iterator();
- while (masterIter.hasNext() && peerIter.hasNext()) {
- Entry<Key,Value> masterEntry = masterIter.next(), peerEntry = peerIter.next();
- Assert.assertEquals(peerEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0,
- masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
- Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
- }
-
- Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
- Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
-
- peerCluster.stop();
- }
-
- @Test
- public void dataReplicatedToCorrectTableWithoutDrain() throws Exception {
- MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
- ROOT_PASSWORD);
- peerCfg.setNumTservers(1);
- peerCfg.setInstanceName("peer");
- peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
-
- updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
-
- MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);
-
- peer1Cluster.start();
-
- try {
- Connector connMaster = getConnector();
- Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
-
- String peerClusterName = "peer";
-
- String peerUserName = "repl";
- String peerPassword = "passwd";
-
- // Create a user on the peer for replication to use
- connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
-
- // Configure the credentials we should use to authenticate ourselves to the peer for replication
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
-
- // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
- connMaster.instanceOperations().setProperty(
- Property.REPLICATION_PEERS.getKey() + peerClusterName,
- ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
- AccumuloReplicaSystem.buildConfiguration(peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));
-
- String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2";
-
- connMaster.tableOperations().create(masterTable1);
- String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
- Assert.assertNotNull(masterTableId1);
-
- connMaster.tableOperations().create(masterTable2);
- String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
- Assert.assertNotNull(masterTableId2);
-
- connPeer.tableOperations().create(peerTable1);
- String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
- Assert.assertNotNull(peerTableId1);
-
- connPeer.tableOperations().create(peerTable2);
- String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
- Assert.assertNotNull(peerTableId2);
-
- // Give our replication user the ability to write to the tables
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);
-
- // Replicate this table to the peerClusterName in a table with the peerTableId table id
- connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1);
-
- connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2);
-
- // Write some data to table1
- BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
- for (int rows = 0; rows < 2500; rows++) {
- Mutation m = new Mutation(masterTable1 + rows);
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- // Write some data to table2
- bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
- for (int rows = 0; rows < 2500; rows++) {
- Mutation m = new Mutation(masterTable2 + rows);
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- log.info("Wrote all data to master cluster");
-
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
-
- cluster.exec(TabletServer.class);
-
- while (!ReplicationTable.isOnline(connMaster)) {
- log.info("Replication table still offline, waiting");
- Thread.sleep(5000);
- }
-
- // Wait until we fully replicated something
- boolean fullyReplicated = false;
- for (int i = 0; i < 10 && !fullyReplicated; i++) {
- UtilWaitThread.sleep(2000);
-
- Scanner s = ReplicationTable.getScanner(connMaster);
- WorkSection.limit(s);
- for (Entry<Key,Value> entry : s) {
- Status status = Status.parseFrom(entry.getValue().get());
- if (StatusUtil.isFullyReplicated(status)) {
- fullyReplicated |= true;
- }
- }
- }
-
- Assert.assertNotEquals(0, fullyReplicated);
-
- // We have to wait for the master to assign the replication work, a local tserver to process it, and then the remote tserver to replay it
- // Be cautious in how quickly we assert that the data is present on the peer
- long countTable = 0l;
- for (int i = 0; i < 10; i++) {
- for (Entry<Key,Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
- countTable++;
- Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
- .startsWith(masterTable1));
- }
-
- log.info("Found {} records in {}", countTable, peerTable1);
-
- if (0l == countTable) {
- Thread.sleep(5000);
- } else {
- break;
- }
- }
-
- Assert.assertTrue("Found no records in " + peerTable1 + " in the peer cluster", countTable > 0);
-
- // We have to wait for the master to assign the replication work, a local tserver to process it, and then the remote tserver to replay it
- // Be cautious in how quickly we assert that the data is present on the peer
- for (int i = 0; i < 10; i++) {
- countTable = 0l;
- for (Entry<Key,Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
- countTable++;
- Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
- .startsWith(masterTable2));
- }
-
- log.info("Found {} records in {}", countTable, peerTable2);
-
- if (0l == countTable) {
- Thread.sleep(5000);
- } else {
- break;
- }
- }
-
- Assert.assertTrue("Found no records in " + peerTable2 + " in the peer cluster", countTable > 0);
-
- } finally {
- peer1Cluster.stop();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/replication/MultiTserverReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/MultiTserverReplicationIT.java b/test/src/test/java/org/apache/accumulo/test/replication/MultiTserverReplicationIT.java
deleted file mode 100644
index 72cb569..0000000
--- a/test/src/test/java/org/apache/accumulo/test/replication/MultiTserverReplicationIT.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.replication;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.HashSet;
-import java.util.Set;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.replication.ReplicationConstants;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooReader;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterables;
-import com.google.common.net.HostAndPort;
-
-/**
- *
- */
-public class MultiTserverReplicationIT extends ConfigurableMacBase {
- private static final Logger log = LoggerFactory.getLogger(MultiTserverReplicationIT.class);
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(2);
- }
-
- @Test
- public void tserverReplicationServicePortsAreAdvertised() throws Exception {
- // Wait for the cluster to be up
- Connector conn = getConnector();
- Instance inst = conn.getInstance();
-
- // Wait for a tserver to come up to fulfill this request
- conn.tableOperations().create("foo");
- Scanner s = conn.createScanner("foo", Authorizations.EMPTY);
- Assert.assertEquals(0, Iterables.size(s));
-
- ZooReader zreader = new ZooReader(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut());
- Set<String> tserverHost = new HashSet<>();
- tserverHost.addAll(zreader.getChildren(ZooUtil.getRoot(inst) + Constants.ZTSERVERS));
-
- Set<HostAndPort> replicationServices = new HashSet<>();
-
- for (String tserver : tserverHost) {
- try {
- byte[] portData = zreader.getData(ZooUtil.getRoot(inst) + ReplicationConstants.ZOO_TSERVERS + "/" + tserver, null);
- HostAndPort replAddress = HostAndPort.fromString(new String(portData, UTF_8));
- replicationServices.add(replAddress);
- } catch (Exception e) {
- log.error("Could not find port for {}", tserver, e);
- Assert.fail("Did not find replication port advertisement for " + tserver);
- }
- }
-
- // Each tserver should also have equial replicaiton services running internally
- Assert.assertEquals("Expected an equal number of replication servicers and tservers", tserverHost.size(), replicationServices.size());
- }
-
- @Test
- public void masterReplicationServicePortsAreAdvertised() throws Exception {
- // Wait for the cluster to be up
- Connector conn = getConnector();
- Instance inst = conn.getInstance();
-
- // Wait for a tserver to come up to fulfill this request
- conn.tableOperations().create("foo");
- Scanner s = conn.createScanner("foo", Authorizations.EMPTY);
- Assert.assertEquals(0, Iterables.size(s));
-
- ZooReader zreader = new ZooReader(inst.getZooKeepers(), inst.getZooKeepersSessionTimeOut());
-
- // Should have one master instance
- Assert.assertEquals(1, inst.getMasterLocations().size());
-
- // Get the master thrift service addr
- String masterAddr = Iterables.getOnlyElement(inst.getMasterLocations());
-
- // Get the master replication coordinator addr
- String replCoordAddr = new String(zreader.getData(ZooUtil.getRoot(inst) + Constants.ZMASTER_REPLICATION_COORDINATOR_ADDR, null), UTF_8);
-
- // They shouldn't be the same
- Assert.assertNotEquals(masterAddr, replCoordAddr);
-
- // Neither should be zero as the port
- Assert.assertNotEquals(0, HostAndPort.fromString(masterAddr).getPort());
- Assert.assertNotEquals(0, HostAndPort.fromString(replCoordAddr).getPort());
- }
-}
[02/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java b/test/src/test/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java
deleted file mode 100644
index 88224b5..0000000
--- a/test/src/test/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java
+++ /dev/null
@@ -1,731 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.replication;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
-import org.apache.accumulo.core.protobuf.ProtobufUtil;
-import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection;
-import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.master.replication.UnorderedWorkAssigner;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.server.replication.ReplicaSystemFactory;
-import org.apache.accumulo.server.replication.StatusUtil;
-import org.apache.accumulo.server.replication.proto.Replication.Status;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.tserver.TabletServer;
-import org.apache.accumulo.tserver.replication.AccumuloReplicaSystem;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-public class UnorderedWorkAssignerReplicationIT extends ConfigurableMacBase {
- private static final Logger log = LoggerFactory.getLogger(UnorderedWorkAssignerReplicationIT.class);
-
- private ExecutorService executor;
- private int timeoutFactor = 1;
-
- @Before
- public void createExecutor() {
- executor = Executors.newSingleThreadExecutor();
-
- try {
- timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
- } catch (NumberFormatException exception) {
- log.warn("Could not parse timeout.factor, not increasing timeout.");
- }
-
- Assert.assertTrue("The timeout factor must be a positive, non-zero value", timeoutFactor > 0);
- }
-
- @After
- public void stopExecutor() {
- if (null != executor) {
- executor.shutdownNow();
- }
- }
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60 * 5;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "10s");
- cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "2M");
- cfg.setProperty(Property.GC_CYCLE_START, "1s");
- cfg.setProperty(Property.GC_CYCLE_DELAY, "5s");
- cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
- cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
- cfg.setProperty(Property.REPLICATION_MAX_UNIT_SIZE, "8M");
- cfg.setProperty(Property.REPLICATION_NAME, "master");
- cfg.setProperty(Property.REPLICATION_WORK_ASSIGNER, UnorderedWorkAssigner.class.getName());
- cfg.setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX, "1M");
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- /**
- * Use the same SSL and credential provider configuration that is set up by AbstractMacIT for the other MAC used for replication
- */
- private void updatePeerConfigFromPrimary(MiniAccumuloConfigImpl primaryCfg, MiniAccumuloConfigImpl peerCfg) {
- // Set the same SSL information from the primary when present
- Map<String,String> primarySiteConfig = primaryCfg.getSiteConfig();
- if ("true".equals(primarySiteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
- Map<String,String> peerSiteConfig = new HashMap<String,String>();
- peerSiteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true");
- String keystorePath = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PATH.getKey());
- Assert.assertNotNull("Keystore Path was null", keystorePath);
- peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), keystorePath);
- String truststorePath = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PATH.getKey());
- Assert.assertNotNull("Truststore Path was null", truststorePath);
- peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), truststorePath);
-
- // Passwords might be stored in CredentialProvider
- String keystorePassword = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey());
- if (null != keystorePassword) {
- peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), keystorePassword);
- }
- String truststorePassword = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey());
- if (null != truststorePassword) {
- peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword);
- }
-
- System.out.println("Setting site configuration for peer " + peerSiteConfig);
- peerCfg.setSiteConfig(peerSiteConfig);
- }
-
- // Use the CredentialProvider if the primary also uses one
- String credProvider = primarySiteConfig.get(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
- if (null != credProvider) {
- Map<String,String> peerSiteConfig = peerCfg.getSiteConfig();
- peerSiteConfig.put(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey(), credProvider);
- peerCfg.setSiteConfig(peerSiteConfig);
- }
- }
-
- @Test
- public void dataWasReplicatedToThePeer() throws Exception {
- MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
- ROOT_PASSWORD);
- peerCfg.setNumTservers(1);
- peerCfg.setInstanceName("peer");
- updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
- peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
- MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
-
- peerCluster.start();
-
- try {
- final Connector connMaster = getConnector();
- final Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
-
- ReplicationTable.setOnline(connMaster);
-
- String peerUserName = "peer", peerPassword = "foo";
-
- String peerClusterName = "peer";
-
- connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
-
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
-
- // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
- connMaster.instanceOperations().setProperty(
- Property.REPLICATION_PEERS.getKey() + peerClusterName,
- ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
- AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
-
- final String masterTable = "master", peerTable = "peer";
-
- connMaster.tableOperations().create(masterTable);
- String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
- Assert.assertNotNull(masterTableId);
-
- connPeer.tableOperations().create(peerTable);
- String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
- Assert.assertNotNull(peerTableId);
-
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
-
- // Replicate this table to the peerClusterName in a table with the peerTableId table id
- connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
-
- // Wait for zookeeper updates (configuration) to propagate
- UtilWaitThread.sleep(3 * 1000);
-
- // Write some data to table1
- BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
- for (int rows = 0; rows < 5000; rows++) {
- Mutation m = new Mutation(Integer.toString(rows));
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- log.info("Wrote all data to master cluster");
-
- final Set<String> filesNeedingReplication = connMaster.replicationOperations().referencedFiles(masterTable);
-
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
- cluster.exec(TabletServer.class);
-
- log.info("TabletServer restarted");
- Iterators.size(ReplicationTable.getScanner(connMaster).iterator());
- log.info("TabletServer is online");
-
- log.info("");
- log.info("Fetching metadata records:");
- for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
- if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
- } else {
- log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
- }
- }
-
- log.info("");
- log.info("Fetching replication records:");
- for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
- }
-
- Future<Boolean> future = executor.submit(new Callable<Boolean>() {
-
- @Override
- public Boolean call() throws Exception {
- connMaster.replicationOperations().drain(masterTable, filesNeedingReplication);
- log.info("Drain completed");
- return true;
- }
-
- });
-
- long timeoutSeconds = timeoutFactor * 30;
- try {
- future.get(timeoutSeconds, TimeUnit.SECONDS);
- } catch (TimeoutException e) {
- future.cancel(true);
- Assert.fail("Drain did not finish within " + timeoutSeconds + " seconds");
- }
-
- log.info("drain completed");
-
- log.info("");
- log.info("Fetching metadata records:");
- for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
- if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
- } else {
- log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
- }
- }
-
- log.info("");
- log.info("Fetching replication records:");
- for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
- log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
- }
-
- Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
- Iterator<Entry<Key,Value>> masterIter = master.iterator(), peerIter = peer.iterator();
- Entry<Key,Value> masterEntry = null, peerEntry = null;
- while (masterIter.hasNext() && peerIter.hasNext()) {
- masterEntry = masterIter.next();
- peerEntry = peerIter.next();
- Assert.assertEquals(masterEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0,
- masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
- Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
- }
-
- log.info("Last master entry: " + masterEntry);
- log.info("Last peer entry: " + peerEntry);
-
- Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
- Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
- } finally {
- peerCluster.stop();
- }
- }
-
- @Test
- public void dataReplicatedToCorrectTable() throws Exception {
- MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
- ROOT_PASSWORD);
- peerCfg.setNumTservers(1);
- peerCfg.setInstanceName("peer");
- updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
- peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
- MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);
-
- peer1Cluster.start();
-
- try {
- Connector connMaster = getConnector();
- Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
-
- String peerClusterName = "peer";
- String peerUserName = "peer", peerPassword = "foo";
-
- // Create local user
- connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
-
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
-
- // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
- connMaster.instanceOperations().setProperty(
- Property.REPLICATION_PEERS.getKey() + peerClusterName,
- ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
- AccumuloReplicaSystem.buildConfiguration(peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));
-
- String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2";
-
- // Create tables
- connMaster.tableOperations().create(masterTable1);
- String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
- Assert.assertNotNull(masterTableId1);
-
- connMaster.tableOperations().create(masterTable2);
- String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
- Assert.assertNotNull(masterTableId2);
-
- connPeer.tableOperations().create(peerTable1);
- String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
- Assert.assertNotNull(peerTableId1);
-
- connPeer.tableOperations().create(peerTable2);
- String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
- Assert.assertNotNull(peerTableId2);
-
- // Grant write permission
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);
-
- // Replicate this table to the peerClusterName in a table with the peerTableId table id
- connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1);
-
- connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2);
-
- // Wait for zookeeper updates (configuration) to propogate
- UtilWaitThread.sleep(3 * 1000);
-
- // Write some data to table1
- BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
- long masterTable1Records = 0l;
- for (int rows = 0; rows < 2500; rows++) {
- Mutation m = new Mutation(masterTable1 + rows);
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- masterTable1Records++;
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- // Write some data to table2
- bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
- long masterTable2Records = 0l;
- for (int rows = 0; rows < 2500; rows++) {
- Mutation m = new Mutation(masterTable2 + rows);
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- masterTable2Records++;
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- log.info("Wrote all data to master cluster");
-
- Set<String> filesFor1 = connMaster.replicationOperations().referencedFiles(masterTable1), filesFor2 = connMaster.replicationOperations().referencedFiles(
- masterTable2);
-
- while (!ReplicationTable.isOnline(connMaster)) {
- Thread.sleep(500);
- }
-
- // Restart the tserver to force a close on the WAL
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
- cluster.exec(TabletServer.class);
-
- log.info("Restarted the tserver");
-
- // Read the data -- the tserver is back up and running
- Iterators.size(connMaster.createScanner(masterTable1, Authorizations.EMPTY).iterator());
-
- // Wait for both tables to be replicated
- log.info("Waiting for {} for {}", filesFor1, masterTable1);
- connMaster.replicationOperations().drain(masterTable1, filesFor1);
-
- log.info("Waiting for {} for {}", filesFor2, masterTable2);
- connMaster.replicationOperations().drain(masterTable2, filesFor2);
-
- long countTable = 0l;
- for (int i = 0; i < 5; i++) {
- countTable = 0l;
- for (Entry<Key,Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
- countTable++;
- Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
- .startsWith(masterTable1));
- }
-
- log.info("Found {} records in {}", countTable, peerTable1);
-
- if (masterTable1Records != countTable) {
- log.warn("Did not find {} expected records in {}, only found {}", masterTable1Records, peerTable1, countTable);
- }
- }
-
- Assert.assertEquals(masterTable1Records, countTable);
-
- for (int i = 0; i < 5; i++) {
- countTable = 0l;
- for (Entry<Key,Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
- countTable++;
- Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
- .startsWith(masterTable2));
- }
-
- log.info("Found {} records in {}", countTable, peerTable2);
-
- if (masterTable2Records != countTable) {
- log.warn("Did not find {} expected records in {}, only found {}", masterTable2Records, peerTable2, countTable);
- }
- }
-
- Assert.assertEquals(masterTable2Records, countTable);
-
- } finally {
- peer1Cluster.stop();
- }
- }
-
- @Test
- public void dataWasReplicatedToThePeerWithoutDrain() throws Exception {
- MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
- ROOT_PASSWORD);
- peerCfg.setNumTservers(1);
- peerCfg.setInstanceName("peer");
- updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
- peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
- MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
-
- peerCluster.start();
-
- Connector connMaster = getConnector();
- Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
-
- String peerUserName = "repl";
- String peerPassword = "passwd";
-
- // Create a user on the peer for replication to use
- connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
-
- String peerClusterName = "peer";
-
- // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
- connMaster.instanceOperations().setProperty(
- Property.REPLICATION_PEERS.getKey() + peerClusterName,
- ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
- AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
-
- // Configure the credentials we should use to authenticate ourselves to the peer for replication
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
-
- String masterTable = "master", peerTable = "peer";
-
- connMaster.tableOperations().create(masterTable);
- String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
- Assert.assertNotNull(masterTableId);
-
- connPeer.tableOperations().create(peerTable);
- String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
- Assert.assertNotNull(peerTableId);
-
- // Give our replication user the ability to write to the table
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
-
- // Replicate this table to the peerClusterName in a table with the peerTableId table id
- connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
-
- // Write some data to table1
- BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
- for (int rows = 0; rows < 5000; rows++) {
- Mutation m = new Mutation(Integer.toString(rows));
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- log.info("Wrote all data to master cluster");
-
- Set<String> files = connMaster.replicationOperations().referencedFiles(masterTable);
- for (String s : files) {
- log.info("Found referenced file for " + masterTable + ": " + s);
- }
-
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
-
- cluster.exec(TabletServer.class);
-
- Iterators.size(connMaster.createScanner(masterTable, Authorizations.EMPTY).iterator());
-
- for (Entry<Key,Value> kv : connMaster.createScanner(ReplicationTable.NAME, Authorizations.EMPTY)) {
- log.debug(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
- }
-
- connMaster.replicationOperations().drain(masterTable, files);
-
- Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
- Iterator<Entry<Key,Value>> masterIter = master.iterator(), peerIter = peer.iterator();
- Assert.assertTrue("No data in master table", masterIter.hasNext());
- Assert.assertTrue("No data in peer table", peerIter.hasNext());
- while (masterIter.hasNext() && peerIter.hasNext()) {
- Entry<Key,Value> masterEntry = masterIter.next(), peerEntry = peerIter.next();
- Assert.assertEquals(peerEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0,
- masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
- Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
- }
-
- Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
- Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
-
- peerCluster.stop();
- }
-
- @Test
- public void dataReplicatedToCorrectTableWithoutDrain() throws Exception {
- MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
- ROOT_PASSWORD);
- peerCfg.setNumTservers(1);
- peerCfg.setInstanceName("peer");
- updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
- peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
- MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);
-
- peer1Cluster.start();
-
- try {
- Connector connMaster = getConnector();
- Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
-
- String peerClusterName = "peer";
-
- String peerUserName = "repl";
- String peerPassword = "passwd";
-
- // Create a user on the peer for replication to use
- connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
-
- // Configure the credentials we should use to authenticate ourselves to the peer for replication
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
- connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
-
- // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
- connMaster.instanceOperations().setProperty(
- Property.REPLICATION_PEERS.getKey() + peerClusterName,
- ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
- AccumuloReplicaSystem.buildConfiguration(peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));
-
- String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2";
-
- connMaster.tableOperations().create(masterTable1);
- String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
- Assert.assertNotNull(masterTableId1);
-
- connMaster.tableOperations().create(masterTable2);
- String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
- Assert.assertNotNull(masterTableId2);
-
- connPeer.tableOperations().create(peerTable1);
- String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
- Assert.assertNotNull(peerTableId1);
-
- connPeer.tableOperations().create(peerTable2);
- String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
- Assert.assertNotNull(peerTableId2);
-
- // Give our replication user the ability to write to the tables
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
- connPeer.securityOperations().grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);
-
- // Replicate this table to the peerClusterName in a table with the peerTableId table id
- connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1);
-
- connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2);
-
- // Wait for zookeeper updates (configuration) to propagate
- UtilWaitThread.sleep(3 * 1000);
-
- // Write some data to table1
- BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
- for (int rows = 0; rows < 2500; rows++) {
- Mutation m = new Mutation(masterTable1 + rows);
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- // Write some data to table2
- bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
- for (int rows = 0; rows < 2500; rows++) {
- Mutation m = new Mutation(masterTable2 + rows);
- for (int cols = 0; cols < 100; cols++) {
- String value = Integer.toString(cols);
- m.put(value, "", value);
- }
- bw.addMutation(m);
- }
-
- bw.close();
-
- log.info("Wrote all data to master cluster");
-
- while (!ReplicationTable.isOnline(connMaster)) {
- Thread.sleep(500);
- }
-
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
-
- cluster.exec(TabletServer.class);
-
- // Wait until we fully replicated something
- boolean fullyReplicated = false;
- for (int i = 0; i < 10 && !fullyReplicated; i++) {
- UtilWaitThread.sleep(timeoutFactor * 2000);
-
- Scanner s = ReplicationTable.getScanner(connMaster);
- WorkSection.limit(s);
- for (Entry<Key,Value> entry : s) {
- Status status = Status.parseFrom(entry.getValue().get());
- if (StatusUtil.isFullyReplicated(status)) {
- fullyReplicated |= true;
- }
- }
- }
-
- Assert.assertNotEquals(0, fullyReplicated);
-
- long countTable = 0l;
-
- // Check a few times
- for (int i = 0; i < 10; i++) {
- countTable = 0l;
- for (Entry<Key,Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
- countTable++;
- Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
- .startsWith(masterTable1));
- }
- log.info("Found {} records in {}", countTable, peerTable1);
- if (0 < countTable) {
- break;
- }
- Thread.sleep(2000);
- }
-
- Assert.assertTrue("Did not find any records in " + peerTable1 + " on peer", countTable > 0);
-
- for (int i = 0; i < 10; i++) {
- countTable = 0l;
- for (Entry<Key,Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
- countTable++;
- Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
- .startsWith(masterTable2));
- }
-
- log.info("Found {} records in {}", countTable, peerTable2);
- if (0 < countTable) {
- break;
- }
- Thread.sleep(2000);
- }
- Assert.assertTrue("Did not find any records in " + peerTable2 + " on peer", countTable > 0);
-
- } finally {
- peer1Cluster.stop();
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java b/test/src/test/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
deleted file mode 100644
index 59197de..0000000
--- a/test/src/test/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.replication;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.apache.accumulo.tserver.logger.LogEvents.OPEN;
-
-import java.io.DataOutputStream;
-import java.io.File;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Map.Entry;
-import java.util.UUID;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.protobuf.ProtobufUtil;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.data.ServerMutation;
-import org.apache.accumulo.server.replication.ReplicaSystemFactory;
-import org.apache.accumulo.server.replication.StatusUtil;
-import org.apache.accumulo.server.replication.proto.Replication.Status;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.tserver.log.DfsLogger;
-import org.apache.accumulo.tserver.logger.LogEvents;
-import org.apache.accumulo.tserver.logger.LogFileKey;
-import org.apache.accumulo.tserver.logger.LogFileValue;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterables;
-
-public class UnusedWalDoesntCloseReplicationStatusIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
- cfg.setNumTservers(1);
- }
-
- @Test
- public void test() throws Exception {
- File accumuloDir = this.getCluster().getConfig().getAccumuloDir();
- final Connector conn = getConnector();
- final String tableName = getUniqueNames(1)[0];
-
- conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
- conn.tableOperations().create(tableName);
-
- final String tableId = conn.tableOperations().tableIdMap().get(tableName);
- final int numericTableId = Integer.parseInt(tableId);
- final int fakeTableId = numericTableId + 1;
-
- Assert.assertNotNull("Did not find table ID", tableId);
-
- conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION.getKey(), "true");
- conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
- // just sleep
- conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
- ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));
-
- FileSystem fs = FileSystem.getLocal(new Configuration());
- File tserverWalDir = new File(accumuloDir, ServerConstants.WAL_DIR + Path.SEPARATOR + "faketserver+port");
- File tserverWal = new File(tserverWalDir, UUID.randomUUID().toString());
- fs.mkdirs(new Path(tserverWalDir.getAbsolutePath()));
-
- // Make a fake WAL with no data in it for our real table
- FSDataOutputStream out = fs.create(new Path(tserverWal.getAbsolutePath()));
-
- out.write(DfsLogger.LOG_FILE_HEADER_V3.getBytes(UTF_8));
-
- DataOutputStream dos = new DataOutputStream(out);
- dos.writeUTF("NullCryptoModule");
-
- // Fake a single update WAL that has a mutation for another table
- LogFileKey key = new LogFileKey();
- LogFileValue value = new LogFileValue();
-
- key.event = OPEN;
- key.tserverSession = tserverWal.getAbsolutePath();
- key.filename = tserverWal.getAbsolutePath();
- key.write(out);
- value.write(out);
-
- key.event = LogEvents.DEFINE_TABLET;
- key.tablet = new KeyExtent(new Text(Integer.toString(fakeTableId)), null, null);
- key.seq = 1l;
- key.tid = 1;
-
- key.write(dos);
- value.write(dos);
-
- key.tablet = null;
- key.event = LogEvents.MUTATION;
- key.filename = tserverWal.getAbsolutePath();
- value.mutations = Arrays.<Mutation> asList(new ServerMutation(new Text("row")));
-
- key.write(dos);
- value.write(dos);
-
- key.event = LogEvents.COMPACTION_START;
- key.filename = accumuloDir.getAbsolutePath() + "/tables/" + fakeTableId + "/t-000001/A000001.rf";
- value.mutations = Collections.emptyList();
-
- key.write(dos);
- value.write(dos);
-
- key.event = LogEvents.COMPACTION_FINISH;
- value.mutations = Collections.emptyList();
-
- key.write(dos);
- value.write(dos);
-
- dos.close();
-
- BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("m");
- m.put("m", "m", "M");
- bw.addMutation(m);
- bw.close();
-
- log.info("State of metadata table after inserting a record");
-
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
- for (Entry<Key,Value> entry : s) {
- System.out.println(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
- }
-
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.ReplicationSection.getRange());
- for (Entry<Key,Value> entry : s) {
- System.out.println(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
- }
-
- log.info("Offline'ing table");
-
- conn.tableOperations().offline(tableName, true);
-
- // Add our fake WAL to the log column for this table
- String walUri = tserverWal.toURI().toString();
- KeyExtent extent = new KeyExtent(new Text(tableId), null, null);
- bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
- m = new Mutation(extent.getMetadataEntry());
- m.put(MetadataSchema.TabletsSection.LogColumnFamily.NAME, new Text("localhost:12345/" + walUri), new Value((walUri + "|1").getBytes(UTF_8)));
- bw.addMutation(m);
-
- // Add a replication entry for our fake WAL
- m = new Mutation(MetadataSchema.ReplicationSection.getRowPrefix() + new Path(walUri).toString());
- m.put(MetadataSchema.ReplicationSection.COLF, new Text(tableId), new Value(StatusUtil.fileCreated(System.currentTimeMillis()).toByteArray()));
- bw.addMutation(m);
- bw.close();
-
- log.info("State of metadata after injecting WAL manually");
-
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
- for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
- }
-
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.ReplicationSection.getRange());
- for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
- }
-
- log.info("Bringing table online");
- conn.tableOperations().online(tableName, true);
-
- Assert.assertEquals(1, Iterables.size(conn.createScanner(tableName, Authorizations.EMPTY)));
-
- log.info("Table has performed recovery, state of metadata:");
-
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
- for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
- }
-
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.ReplicationSection.getRange());
- for (Entry<Key,Value> entry : s) {
- Status status = Status.parseFrom(entry.getValue().get());
- log.info(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(status));
- Assert.assertFalse("Status record was closed and it should not be", status.getClosed());
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/server/security/SystemCredentialsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/server/security/SystemCredentialsIT.java b/test/src/test/java/org/apache/accumulo/test/server/security/SystemCredentialsIT.java
deleted file mode 100644
index 9752916..0000000
--- a/test/src/test/java/org/apache/accumulo/test/server/security/SystemCredentialsIT.java
+++ /dev/null
@@ -1,233 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.server.security;
-
-import static org.junit.Assert.assertEquals;
-
-import java.nio.ByteBuffer;
-import java.util.List;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.security.SecurityErrorCode;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.security.SystemCredentials;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.junit.Test;
-
-public class SystemCredentialsIT extends ConfigurableMacBase {
-
- private static final int FAIL_CODE = 7, BAD_PASSWD_FAIL_CODE = 8;
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 1 * 60;
- }
-
- @Test
- public void testSystemCredentials() throws Exception {
- assertEquals(0, exec(SystemCredentialsIT.class, "good", getCluster().getZooKeepers()).waitFor());
- assertEquals(FAIL_CODE, exec(SystemCredentialsIT.class, "bad", getCluster().getZooKeepers()).waitFor());
- assertEquals(BAD_PASSWD_FAIL_CODE, exec(SystemCredentialsIT.class, "bad_password", getCluster().getZooKeepers()).waitFor());
- }
-
- public static void main(final String[] args) throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
- Credentials creds = null;
- if (args.length < 2)
- throw new RuntimeException("Incorrect usage; expected to be run by test only");
- if (args[0].equals("bad")) {
- Instance inst = new Instance() {
-
- @Deprecated
- @Override
- public void setConfiguration(AccumuloConfiguration conf) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int getZooKeepersSessionTimeOut() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getZooKeepers() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getRootTabletLocation() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public List<String> getMasterLocations() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getInstanceName() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getInstanceID() {
- return SystemCredentials.class.getName();
- }
-
- @Override
- public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
- throw new UnsupportedOperationException();
- }
-
- @Deprecated
- @Override
- public Connector getConnector(String user, CharSequence pass) throws AccumuloException, AccumuloSecurityException {
- throw new UnsupportedOperationException();
- }
-
- @Deprecated
- @Override
- public Connector getConnector(String user, ByteBuffer pass) throws AccumuloException, AccumuloSecurityException {
- throw new UnsupportedOperationException();
- }
-
- @Deprecated
- @Override
- public Connector getConnector(String user, byte[] pass) throws AccumuloException, AccumuloSecurityException {
- throw new UnsupportedOperationException();
- }
-
- @Deprecated
- @Override
- public AccumuloConfiguration getConfiguration() {
- throw new UnsupportedOperationException();
- }
-
- };
- creds = SystemCredentials.get(inst);
- } else if (args[0].equals("good")) {
- creds = SystemCredentials.get(HdfsZooInstance.getInstance());
- } else if (args[0].equals("bad_password")) {
- Instance inst = new Instance() {
-
- @Override
- public int getZooKeepersSessionTimeOut() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getZooKeepers() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getRootTabletLocation() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public List<String> getMasterLocations() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getInstanceName() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getInstanceID() {
- return SystemCredentials.class.getName();
- }
-
- @Override
- public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
- throw new UnsupportedOperationException();
- }
-
- @Deprecated
- @Override
- public Connector getConnector(String user, CharSequence pass) throws AccumuloException, AccumuloSecurityException {
- throw new UnsupportedOperationException();
- }
-
- @Deprecated
- @Override
- public Connector getConnector(String user, ByteBuffer pass) throws AccumuloException, AccumuloSecurityException {
- throw new UnsupportedOperationException();
- }
-
- @Deprecated
- @Override
- public Connector getConnector(String user, byte[] pass) throws AccumuloException, AccumuloSecurityException {
- throw new UnsupportedOperationException();
- }
-
- @Deprecated
- @Override
- public AccumuloConfiguration getConfiguration() {
- throw new UnsupportedOperationException();
- }
-
- @Deprecated
- @Override
- public void setConfiguration(AccumuloConfiguration conf) {
- throw new UnsupportedOperationException();
- }
-
- };
- creds = new SystemCredentials(inst, "!SYSTEM", new PasswordToken("fake"));
- } else {
- throw new RuntimeException("Incorrect usage; expected to be run by test only");
- }
- Instance instance = HdfsZooInstance.getInstance();
- Connector conn;
- try {
- conn = instance.getConnector(creds.getPrincipal(), creds.getToken());
- } catch (AccumuloSecurityException e) {
- e.printStackTrace(System.err);
- System.exit(BAD_PASSWD_FAIL_CODE);
- return;
- }
- try {
- Scanner scan = conn.createScanner(RootTable.NAME, Authorizations.EMPTY);
- for (Entry<Key,Value> e : scan) {
- e.hashCode();
- }
- } catch (RuntimeException e) {
- // catch the runtime exception from the scanner iterator
- if (e.getCause() instanceof AccumuloSecurityException
- && ((AccumuloSecurityException) e.getCause()).getSecurityErrorCode() == SecurityErrorCode.BAD_CREDENTIALS) {
- e.printStackTrace(System.err);
- System.exit(FAIL_CODE);
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/start/KeywordStartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/start/KeywordStartIT.java b/test/src/test/java/org/apache/accumulo/test/start/KeywordStartIT.java
deleted file mode 100644
index f7f250a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/start/KeywordStartIT.java
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.start;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.lang.reflect.Modifier;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.file.rfile.PrintInfo;
-import org.apache.accumulo.core.util.Classpath;
-import org.apache.accumulo.core.util.CreateToken;
-import org.apache.accumulo.core.util.Help;
-import org.apache.accumulo.core.util.Jar;
-import org.apache.accumulo.core.util.Version;
-import org.apache.accumulo.gc.GCExecutable;
-import org.apache.accumulo.gc.SimpleGarbageCollector;
-import org.apache.accumulo.master.Master;
-import org.apache.accumulo.master.MasterExecutable;
-import org.apache.accumulo.minicluster.MiniAccumuloRunner;
-import org.apache.accumulo.minicluster.impl.MiniClusterExecutable;
-import org.apache.accumulo.monitor.Monitor;
-import org.apache.accumulo.monitor.MonitorExecutable;
-import org.apache.accumulo.proxy.Proxy;
-import org.apache.accumulo.server.init.Initialize;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.accumulo.server.util.Info;
-import org.apache.accumulo.server.util.LoginProperties;
-import org.apache.accumulo.server.util.ZooKeeperMain;
-import org.apache.accumulo.shell.Shell;
-import org.apache.accumulo.start.Main;
-import org.apache.accumulo.start.spi.KeywordExecutable;
-import org.apache.accumulo.tracer.TraceServer;
-import org.apache.accumulo.tracer.TracerExecutable;
-import org.apache.accumulo.tserver.TServerExecutable;
-import org.apache.accumulo.tserver.TabletServer;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class KeywordStartIT {
-
- private final Logger log = LoggerFactory.getLogger(getClass());
-
- @Test
- public void testKeywordsMatch() throws IOException {
- for (Entry<String,KeywordExecutable> entry : Main.getExecutables(getClass().getClassLoader()).entrySet()) {
- assertEquals(entry.getKey(), entry.getValue().keyword());
- }
- }
-
- @Test
- public void testCheckDuplicates() {
- NoOp one = new NoOp("one");
- NoOp anotherOne = new NoOp("another");
- NoOp two = new NoOp("two");
- NoOp three = new NoOp("three");
- List<NoOp> services = Arrays.asList(one, three, two, two, three, three, anotherOne);
- assertEquals(7, services.size());
- Map<String,KeywordExecutable> results = Main.checkDuplicates(services);
- assertTrue(results.containsKey(one.keyword()));
- assertTrue(results.containsKey(anotherOne.keyword()));
- assertFalse(results.containsKey(two.keyword()));
- assertFalse(results.containsKey(three.keyword()));
- assertEquals(2, results.size());
- }
-
- // Note: this test may fail in Eclipse, if the services files haven't been generated by the AutoService annotation processor
- @Test
- public void testExpectedClasses() throws IOException {
- TreeMap<String,Class<? extends KeywordExecutable>> expectSet = new TreeMap<>();
- expectSet.put("admin", Admin.class);
- expectSet.put("classpath", Classpath.class);
- expectSet.put("create-token", CreateToken.class);
- expectSet.put("gc", GCExecutable.class);
- expectSet.put("help", Help.class);
- expectSet.put("info", Info.class);
- expectSet.put("init", Initialize.class);
- expectSet.put("jar", Jar.class);
- expectSet.put("login-info", LoginProperties.class);
- expectSet.put("master", MasterExecutable.class);
- expectSet.put("minicluster", MiniClusterExecutable.class);
- expectSet.put("monitor", MonitorExecutable.class);
- expectSet.put("proxy", Proxy.class);
- expectSet.put("rfile-info", PrintInfo.class);
- expectSet.put("shell", Shell.class);
- expectSet.put("tracer", TracerExecutable.class);
- expectSet.put("tserver", TServerExecutable.class);
- expectSet.put("version", Version.class);
- expectSet.put("zookeeper", ZooKeeperMain.class);
-
- Iterator<Entry<String,Class<? extends KeywordExecutable>>> expectIter = expectSet.entrySet().iterator();
- TreeMap<String,KeywordExecutable> actualSet = new TreeMap<>(Main.getExecutables(getClass().getClassLoader()));
- Iterator<Entry<String,KeywordExecutable>> actualIter = actualSet.entrySet().iterator();
- Entry<String,Class<? extends KeywordExecutable>> expected;
- Entry<String,KeywordExecutable> actual;
- while (expectIter.hasNext() && actualIter.hasNext()) {
- expected = expectIter.next();
- actual = actualIter.next();
- assertEquals(expected.getKey(), actual.getKey());
- assertEquals(expected.getValue(), actual.getValue().getClass());
- }
- boolean moreExpected = expectIter.hasNext();
- if (moreExpected) {
- while (expectIter.hasNext()) {
- log.warn("Missing class for keyword '" + expectIter.next() + "'");
- }
- }
- assertFalse("Missing expected classes", moreExpected);
- boolean moreActual = actualIter.hasNext();
- if (moreActual) {
- while (actualIter.hasNext()) {
- log.warn("Extra class found with keyword '" + actualIter.next() + "'");
- }
- }
- assertFalse("Found additional unexpected classes", moreActual);
- }
-
- @Test
- public void checkHasMain() {
- assertFalse("Sanity check for test failed. Somehow the test class has a main method", hasMain(this.getClass()));
-
- HashSet<Class<?>> expectSet = new HashSet<>();
- expectSet.add(Admin.class);
- expectSet.add(CreateToken.class);
- expectSet.add(Info.class);
- expectSet.add(Initialize.class);
- expectSet.add(LoginProperties.class);
- expectSet.add(Master.class);
- expectSet.add(MiniAccumuloRunner.class);
- expectSet.add(Monitor.class);
- expectSet.add(PrintInfo.class);
- expectSet.add(Proxy.class);
- expectSet.add(Shell.class);
- expectSet.add(SimpleGarbageCollector.class);
- expectSet.add(TabletServer.class);
- expectSet.add(TraceServer.class);
- expectSet.add(ZooKeeperMain.class);
-
- for (Class<?> c : expectSet) {
- assertTrue("Class " + c.getName() + " is missing a main method!", hasMain(c));
- }
-
- }
-
- private static boolean hasMain(Class<?> classToCheck) {
- Method main;
- try {
- main = classToCheck.getMethod("main", new String[0].getClass());
- } catch (NoSuchMethodException e) {
- return false;
- }
- return main != null && Modifier.isPublic(main.getModifiers()) && Modifier.isStatic(main.getModifiers());
- }
-
- private static class NoOp implements KeywordExecutable {
-
- private final String kw;
-
- public NoOp(String kw) {
- this.kw = kw;
- }
-
- @Override
- public String keyword() {
- return kw;
- }
-
- @Override
- public void execute(String[] args) throws Exception {}
-
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/util/CertUtils.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/util/CertUtils.java b/test/src/test/java/org/apache/accumulo/test/util/CertUtils.java
deleted file mode 100644
index 2345ea7..0000000
--- a/test/src/test/java/org/apache/accumulo/test/util/CertUtils.java
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.util;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.math.BigInteger;
-import java.security.KeyPair;
-import java.security.KeyPairGenerator;
-import java.security.KeyStore;
-import java.security.KeyStoreException;
-import java.security.NoSuchAlgorithmException;
-import java.security.NoSuchProviderException;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.Security;
-import java.security.UnrecoverableKeyException;
-import java.security.cert.Certificate;
-import java.security.cert.CertificateException;
-import java.util.Calendar;
-import java.util.Enumeration;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.cli.Help;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.DefaultConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.conf.SiteConfiguration;
-import org.apache.commons.io.FileExistsException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.bouncycastle.asn1.x500.X500Name;
-import org.bouncycastle.asn1.x500.style.IETFUtils;
-import org.bouncycastle.asn1.x500.style.RFC4519Style;
-import org.bouncycastle.asn1.x509.BasicConstraints;
-import org.bouncycastle.asn1.x509.Extension;
-import org.bouncycastle.asn1.x509.KeyUsage;
-import org.bouncycastle.cert.CertIOException;
-import org.bouncycastle.cert.X509CertificateHolder;
-import org.bouncycastle.cert.jcajce.JcaX509ExtensionUtils;
-import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder;
-import org.bouncycastle.jce.provider.BouncyCastleProvider;
-import org.bouncycastle.jce.provider.X509CertificateObject;
-import org.bouncycastle.operator.OperatorCreationException;
-import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.beust.jcommander.JCommander;
-import com.beust.jcommander.Parameter;
-import com.google.common.base.Predicate;
-
-public class CertUtils {
- private static final Logger log = LoggerFactory.getLogger(CertUtils.class);
- static {
- Security.addProvider(new BouncyCastleProvider());
- }
-
- static class Opts extends Help {
- @Parameter(description = "generate-all | generate-local | generate-self-trusted", required = true, arity = 1)
- List<String> operation = null;
-
- @Parameter(names = {"--local-keystore"}, description = "Target path for generated keystore")
- String localKeystore = null;
-
- @Parameter(names = {"--root-keystore"}, description = "Path to root truststore, generated with generate-all, or used for signing with generate-local")
- String rootKeystore = null;
-
- @Parameter(names = {"--root-truststore"}, description = "Target path for generated public root truststore")
- String truststore = null;
-
- @Parameter(names = {"--keystore-type"}, description = "Type of keystore file to use")
- String keystoreType = "JKS";
-
- @Parameter(names = {"--root-keystore-password"}, description = "Password for root keystore, falls back to --keystore-password if not provided")
- String rootKeystorePassword = null;
-
- @Parameter(
- names = {"--keystore-password"},
- description = "Password used to encrypt keystores. If omitted, the instance-wide secret will be used. If specified, the password must also be explicitly configured in Accumulo.")
- String keystorePassword = null;
-
- @Parameter(names = {"--truststore-password"}, description = "Password used to encrypt the truststore. If omitted, empty password is used")
- String truststorePassword = "";
-
- @Parameter(names = {"--key-name-prefix"}, description = "Prefix for names of generated keys")
- String keyNamePrefix = CertUtils.class.getSimpleName();
-
- @Parameter(names = {"--issuer-rdn"}, description = "RDN string for issuer, for example: 'c=US,o=My Organization,cn=My Name'")
- String issuerDirString = "o=Apache Accumulo";
-
- @Parameter(names = "--site-file", description = "Load configuration from the given site file")
- public String siteFile = null;
-
- @Parameter(names = "--signing-algorithm", description = "Algorithm used to sign certificates")
- public String signingAlg = "SHA256WITHRSA";
-
- @Parameter(names = "--encryption-algorithm", description = "Algorithm used to encrypt private keys")
- public String encryptionAlg = "RSA";
-
- @Parameter(names = "--keysize", description = "Key size used by encryption algorithm")
- public int keysize = 2048;
-
- public AccumuloConfiguration getConfiguration() {
- if (siteFile == null) {
- return SiteConfiguration.getInstance(DefaultConfiguration.getInstance());
- } else {
- return new AccumuloConfiguration() {
- Configuration xml = new Configuration();
- {
- xml.addResource(new Path(siteFile));
- }
-
- @Override
- public Iterator<Entry<String,String>> iterator() {
- TreeMap<String,String> map = new TreeMap<String,String>();
- for (Entry<String,String> props : DefaultConfiguration.getInstance())
- map.put(props.getKey(), props.getValue());
- for (Entry<String,String> props : xml)
- map.put(props.getKey(), props.getValue());
- return map.entrySet().iterator();
- }
-
- @Override
- public String get(Property property) {
- String value = xml.get(property.getKey());
- if (value != null)
- return value;
- return DefaultConfiguration.getInstance().get(property);
- }
-
- @Override
- public void getProperties(Map<String,String> props, Predicate<String> filter) {
- for (Entry<String,String> entry : this)
- if (filter.apply(entry.getKey()))
- props.put(entry.getKey(), entry.getValue());
- }
- };
- }
- }
- }
-
- public static void main(String[] args) throws Exception {
- Opts opts = new Opts();
- opts.parseArgs(CertUtils.class.getName(), args);
- String operation = opts.operation.get(0);
-
- String keyPassword = opts.keystorePassword;
- if (keyPassword == null)
- keyPassword = getDefaultKeyPassword();
-
- String rootKeyPassword = opts.rootKeystorePassword;
- if (rootKeyPassword == null) {
- rootKeyPassword = keyPassword;
- }
-
- CertUtils certUtils = new CertUtils(opts.keystoreType, opts.issuerDirString, opts.encryptionAlg, opts.keysize, opts.signingAlg);
-
- if ("generate-all".equals(operation)) {
- certUtils.createAll(new File(opts.rootKeystore), new File(opts.localKeystore), new File(opts.truststore), opts.keyNamePrefix, rootKeyPassword,
- keyPassword, opts.truststorePassword);
- } else if ("generate-local".equals(operation)) {
- certUtils.createSignedCert(new File(opts.localKeystore), opts.keyNamePrefix + "-local", keyPassword, opts.rootKeystore, rootKeyPassword);
- } else if ("generate-self-trusted".equals(operation)) {
- certUtils.createSelfSignedCert(new File(opts.truststore), opts.keyNamePrefix + "-selfTrusted", keyPassword);
- } else {
- JCommander jcommander = new JCommander(opts);
- jcommander.setProgramName(CertUtils.class.getName());
- jcommander.usage();
- System.err.println("Unrecognized operation: " + opts.operation);
- System.exit(0);
- }
- }
-
- private static String getDefaultKeyPassword() {
- return SiteConfiguration.getInstance(DefaultConfiguration.getInstance()).get(Property.INSTANCE_SECRET);
- }
-
- private String issuerDirString;
- private String keystoreType;
- private String encryptionAlgorithm;
- private int keysize;
- private String signingAlgorithm;
-
- public CertUtils(String keystoreType, String issuerDirString, String encryptionAlgorithm, int keysize, String signingAlgorithm) {
- super();
- this.keystoreType = keystoreType;
- this.issuerDirString = issuerDirString;
- this.encryptionAlgorithm = encryptionAlgorithm;
- this.keysize = keysize;
- this.signingAlgorithm = signingAlgorithm;
- }
-
- public void createAll(File rootKeystoreFile, File localKeystoreFile, File trustStoreFile, String keyNamePrefix, String rootKeystorePassword,
- String keystorePassword, String truststorePassword) throws KeyStoreException, CertificateException, NoSuchAlgorithmException, IOException,
- OperatorCreationException, AccumuloSecurityException, NoSuchProviderException, UnrecoverableKeyException, FileNotFoundException {
- createSelfSignedCert(rootKeystoreFile, keyNamePrefix + "-root", rootKeystorePassword);
- createSignedCert(localKeystoreFile, keyNamePrefix + "-local", keystorePassword, rootKeystoreFile.getAbsolutePath(), rootKeystorePassword);
- createPublicCert(trustStoreFile, keyNamePrefix + "-public", rootKeystoreFile.getAbsolutePath(), rootKeystorePassword, truststorePassword);
- }
-
- public void createPublicCert(File targetKeystoreFile, String keyName, String rootKeystorePath, String rootKeystorePassword, String truststorePassword)
- throws NoSuchAlgorithmException, CertificateException, FileNotFoundException, IOException, KeyStoreException, UnrecoverableKeyException {
- KeyStore signerKeystore = KeyStore.getInstance(keystoreType);
- char[] signerPasswordArray = rootKeystorePassword.toCharArray();
- try (FileInputStream fis = new FileInputStream(rootKeystorePath)) {
- signerKeystore.load(fis, signerPasswordArray);
- }
- Certificate rootCert = findCert(signerKeystore);
-
- KeyStore keystore = KeyStore.getInstance(keystoreType);
- keystore.load(null, null);
- keystore.setCertificateEntry(keyName + "Cert", rootCert);
- try (FileOutputStream fos = new FileOutputStream(targetKeystoreFile)) {
- keystore.store(fos, truststorePassword.toCharArray());
- }
- }
-
- public void createSignedCert(File targetKeystoreFile, String keyName, String keystorePassword, String signerKeystorePath, String signerKeystorePassword)
- throws KeyStoreException, CertificateException, NoSuchAlgorithmException, IOException, OperatorCreationException, AccumuloSecurityException,
- UnrecoverableKeyException, NoSuchProviderException {
- KeyStore signerKeystore = KeyStore.getInstance(keystoreType);
- char[] signerPasswordArray = signerKeystorePassword.toCharArray();
- try (FileInputStream fis = new FileInputStream(signerKeystorePath)) {
- signerKeystore.load(fis, signerPasswordArray);
- }
- Certificate signerCert = findCert(signerKeystore);
- PrivateKey signerKey = findPrivateKey(signerKeystore, signerPasswordArray);
-
- KeyPair kp = generateKeyPair();
- X509CertificateObject cert = generateCert(keyName, kp, false, signerCert.getPublicKey(), signerKey);
-
- char[] password = keystorePassword.toCharArray();
- KeyStore keystore = KeyStore.getInstance(keystoreType);
- keystore.load(null, null);
- keystore.setCertificateEntry(keyName + "Cert", cert);
- keystore.setKeyEntry(keyName + "Key", kp.getPrivate(), password, new Certificate[] {cert, signerCert});
- try (FileOutputStream fos = new FileOutputStream(targetKeystoreFile)) {
- keystore.store(fos, password);
- }
- }
-
- public void createSelfSignedCert(File targetKeystoreFile, String keyName, String keystorePassword) throws KeyStoreException, CertificateException,
- NoSuchAlgorithmException, IOException, OperatorCreationException, AccumuloSecurityException, NoSuchProviderException {
- if (targetKeystoreFile.exists()) {
- throw new FileExistsException(targetKeystoreFile);
- }
-
- KeyPair kp = generateKeyPair();
-
- X509CertificateObject cert = generateCert(keyName, kp, true, kp.getPublic(), kp.getPrivate());
-
- char[] password = keystorePassword.toCharArray();
- KeyStore keystore = KeyStore.getInstance(keystoreType);
- keystore.load(null, null);
- keystore.setCertificateEntry(keyName + "Cert", cert);
- keystore.setKeyEntry(keyName + "Key", kp.getPrivate(), password, new Certificate[] {cert});
- try (FileOutputStream fos = new FileOutputStream(targetKeystoreFile)) {
- keystore.store(fos, password);
- }
- }
-
- private KeyPair generateKeyPair() throws NoSuchAlgorithmException, NoSuchProviderException {
- KeyPairGenerator gen = KeyPairGenerator.getInstance(encryptionAlgorithm);
- gen.initialize(keysize);
- return gen.generateKeyPair();
- }
-
- private X509CertificateObject generateCert(String keyName, KeyPair kp, boolean isCertAuthority, PublicKey signerPublicKey, PrivateKey signerPrivateKey)
- throws IOException, CertIOException, OperatorCreationException, CertificateException, NoSuchAlgorithmException {
- Calendar startDate = Calendar.getInstance();
- Calendar endDate = Calendar.getInstance();
- endDate.add(Calendar.YEAR, 100);
-
- BigInteger serialNumber = BigInteger.valueOf((startDate.getTimeInMillis()));
- X500Name issuer = new X500Name(IETFUtils.rDNsFromString(issuerDirString, RFC4519Style.INSTANCE));
- JcaX509v3CertificateBuilder certGen = new JcaX509v3CertificateBuilder(issuer, serialNumber, startDate.getTime(), endDate.getTime(), issuer, kp.getPublic());
- JcaX509ExtensionUtils extensionUtils = new JcaX509ExtensionUtils();
- certGen.addExtension(Extension.subjectKeyIdentifier, false, extensionUtils.createSubjectKeyIdentifier(kp.getPublic()));
- certGen.addExtension(Extension.basicConstraints, false, new BasicConstraints(isCertAuthority));
- certGen.addExtension(Extension.authorityKeyIdentifier, false, extensionUtils.createAuthorityKeyIdentifier(signerPublicKey));
- if (isCertAuthority) {
- certGen.addExtension(Extension.keyUsage, true, new KeyUsage(KeyUsage.keyCertSign));
- }
- X509CertificateHolder cert = certGen.build(new JcaContentSignerBuilder(signingAlgorithm).build(signerPrivateKey));
- return new X509CertificateObject(cert.toASN1Structure());
- }
-
- static Certificate findCert(KeyStore keyStore) throws KeyStoreException {
- Enumeration<String> aliases = keyStore.aliases();
- Certificate cert = null;
- while (aliases.hasMoreElements()) {
- String alias = aliases.nextElement();
- if (keyStore.isCertificateEntry(alias)) {
- if (cert == null) {
- cert = keyStore.getCertificate(alias);
- } else {
- log.warn("Found multiple certificates in keystore. Ignoring " + alias);
- }
- }
- }
- if (cert == null) {
- throw new KeyStoreException("Could not find cert in keystore");
- }
- return cert;
- }
-
- static PrivateKey findPrivateKey(KeyStore keyStore, char[] keystorePassword) throws UnrecoverableKeyException, KeyStoreException, NoSuchAlgorithmException {
- Enumeration<String> aliases = keyStore.aliases();
- PrivateKey key = null;
- while (aliases.hasMoreElements()) {
- String alias = aliases.nextElement();
- if (keyStore.isKeyEntry(alias)) {
- if (key == null) {
- key = (PrivateKey) keyStore.getKey(alias, keystorePassword);
- } else {
- log.warn("Found multiple keys in keystore. Ignoring " + alias);
- }
- }
- }
- if (key == null) {
- throw new KeyStoreException("Could not find private key in keystore");
- }
- return key;
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/FooConstraint.jar
----------------------------------------------------------------------
diff --git a/test/src/test/resources/FooConstraint.jar b/test/src/test/resources/FooConstraint.jar
deleted file mode 100644
index 14673da..0000000
Binary files a/test/src/test/resources/FooConstraint.jar and /dev/null differ
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/FooFilter.jar
----------------------------------------------------------------------
diff --git a/test/src/test/resources/FooFilter.jar b/test/src/test/resources/FooFilter.jar
deleted file mode 100644
index ef30cbc..0000000
Binary files a/test/src/test/resources/FooFilter.jar and /dev/null differ
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/TestCombinerX.jar
----------------------------------------------------------------------
diff --git a/test/src/test/resources/TestCombinerX.jar b/test/src/test/resources/TestCombinerX.jar
deleted file mode 100644
index 849e447..0000000
Binary files a/test/src/test/resources/TestCombinerX.jar and /dev/null differ
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/TestCombinerY.jar
----------------------------------------------------------------------
diff --git a/test/src/test/resources/TestCombinerY.jar b/test/src/test/resources/TestCombinerY.jar
deleted file mode 100644
index 30da0cbf..0000000
Binary files a/test/src/test/resources/TestCombinerY.jar and /dev/null differ
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/TestCompactionStrat.jar
----------------------------------------------------------------------
diff --git a/test/src/test/resources/TestCompactionStrat.jar b/test/src/test/resources/TestCompactionStrat.jar
deleted file mode 100644
index 3daa16e..0000000
Binary files a/test/src/test/resources/TestCompactionStrat.jar and /dev/null differ
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/conf/accumulo-site.xml
----------------------------------------------------------------------
diff --git a/test/src/test/resources/conf/accumulo-site.xml b/test/src/test/resources/conf/accumulo-site.xml
deleted file mode 100644
index e1f0e70..0000000
--- a/test/src/test/resources/conf/accumulo-site.xml
+++ /dev/null
@@ -1,123 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<configuration>
- <!--
- Put your site-specific accumulo configurations here.
-
- The available configuration values along with their defaults
- are documented in docs/config.html
-
- Unless you are simply testing at your workstation, you will most
- definitely need to change the three entries below.
- -->
-
- <property>
- <name>instance.zookeeper.host</name>
- <value>localhost:2181</value>
- <description>comma separated list of zookeeper servers</description>
- </property>
-
- <property>
- <name>logger.dir.walog</name>
- <value>walogs</value>
- <description>The directory used to store write-ahead logs on the local filesystem. It is possible to specify a comma-separated list of directories.
- </description>
- </property>
-
- <property>
- <name>instance.secret</name>
- <value>DEFAULT</value>
- <description>A secret unique to a given instance that all servers must know in order to communicate with one another.
- Change it before initialization. To
- change it later use ./bin/accumulo org.apache.accumulo.server.util.ChangeSecret [oldpasswd] [newpasswd],
- and then update this file.
- </description>
- </property>
-
- <property>
- <name>tserver.memory.maps.max</name>
- <value>80M</value>
- </property>
-
- <property>
- <name>tserver.cache.data.size</name>
- <value>7M</value>
- </property>
-
- <property>
- <name>tserver.cache.index.size</name>
- <value>20M</value>
- </property>
-
- <property>
- <name>trace.password</name>
- <!--
- change this to the root user's password, and/or change the user below
- -->
- <value>secret</value>
- </property>
-
- <property>
- <name>trace.user</name>
- <value>root</value>
- </property>
-
- <property>
- <name>tserver.sort.buffer.size</name>
- <value>50M</value>
- </property>
-
- <property>
- <name>tserver.walog.max.size</name>
- <value>100M</value>
- </property>
-
- <property>
- <name>general.classpaths</name>
- <!--
- Add the following for hadoop-2.0
- $HADOOP_PREFIX/share/hadoop/common/.*.jar,
- $HADOOP_PREFIX/share/hadoop/common/lib/.*.jar,
- $HADOOP_PREFIX/share/hadoop/hdfs/.*.jar,
- $HADOOP_PREFIX/share/hadoop/mapreduce/.*.jar,
- $HADOOP_PREFIX/share/hadoop/yarn/.*.jar,
- -->
- <value>
- $ACCUMULO_HOME/server/target/classes/,
- $ACCUMULO_HOME/lib/accumulo-server.jar,
- $ACCUMULO_HOME/core/target/classes/,
- $ACCUMULO_HOME/lib/accumulo-core.jar,
- $ACCUMULO_HOME/start/target/classes/,
- $ACCUMULO_HOME/lib/accumulo-start.jar,
- $ACCUMULO_HOME/fate/target/classes/,
- $ACCUMULO_HOME/lib/accumulo-fate.jar,
- $ACCUMULO_HOME/proxy/target/classes/,
- $ACCUMULO_HOME/lib/accumulo-proxy.jar,
- $ACCUMULO_HOME/lib/[^.].*.jar,
- $ZOOKEEPER_HOME/zookeeper[^.].*.jar,
- $HADOOP_CONF_DIR,
- $HADOOP_PREFIX/[^.].*.jar,
- $HADOOP_PREFIX/lib/[^.].*.jar,
- </value>
- <description>Classpaths that accumulo checks for updates and class files.
- When using the Security Manager, please remove the ".../target/classes/" values.
- </description>
- </property>
-</configuration>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/resources/conf/generic_logger.xml
----------------------------------------------------------------------
diff --git a/test/src/test/resources/conf/generic_logger.xml b/test/src/test/resources/conf/generic_logger.xml
deleted file mode 100644
index db79efe..0000000
--- a/test/src/test/resources/conf/generic_logger.xml
+++ /dev/null
@@ -1,83 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
--->
-<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
-<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
-
- <!-- Write out everything at the DEBUG level to the debug log -->
- <appender name="A2" class="org.apache.log4j.RollingFileAppender">
- <param name="File" value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.debug.log"/>
- <param name="MaxFileSize" value="1000MB"/>
- <param name="MaxBackupIndex" value="10"/>
- <param name="Threshold" value="DEBUG"/>
- <layout class="org.apache.log4j.PatternLayout">
- <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %m%n"/>
- </layout>
- </appender>
-
- <!-- Write out INFO and higher to the regular log -->
- <appender name="A3" class="org.apache.log4j.RollingFileAppender">
- <param name="File" value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.log"/>
- <param name="MaxFileSize" value="1000MB"/>
- <param name="MaxBackupIndex" value="10"/>
- <param name="Threshold" value="INFO"/>
- <layout class="org.apache.log4j.PatternLayout">
- <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %m%n"/>
- </layout>
- </appender>
-
- <!-- Send all logging data to a centralized logger -->
- <appender name="N1" class="org.apache.log4j.net.SocketAppender">
- <param name="remoteHost" value="${org.apache.accumulo.core.host.log}"/>
- <param name="port" value="${org.apache.accumulo.core.host.log.port}"/>
- <param name="application" value="${org.apache.accumulo.core.application}:${org.apache.accumulo.core.ip.localhost.hostname}"/>
- <param name="Threshold" value="WARN"/>
- </appender>
-
- <!-- If the centralized logger is down, buffer the log events, but drop them if it stays down -->
- <appender name="ASYNC" class="org.apache.log4j.AsyncAppender">
- <appender-ref ref="N1" />
- </appender>
-
- <!-- Log accumulo events to the debug, normal and remote logs. -->
- <logger name="org.apache.accumulo" additivity="false">
- <level value="DEBUG"/>
- <appender-ref ref="A2" />
- <appender-ref ref="A3" />
- <appender-ref ref="ASYNC" />
- </logger>
-
- <logger name="org.apache.accumulo.core.file.rfile.bcfile">
- <level value="INFO"/>
- </logger>
-
- <logger name="org.mortbay.log">
- <level value="WARN"/>
- </logger>
-
- <logger name="org.apache.zookeeper">
- <level value="ERROR"/>
- </logger>
-
- <!-- Log non-accumulo events to the debug and normal logs. -->
- <root>
- <level value="INFO"/>
- <appender-ref ref="A2" />
- <appender-ref ref="A3" />
- </root>
-
-</log4j:configuration>
[09/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java
deleted file mode 100644
index 0c22196..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/RenameIT.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.junit.Test;
-
-public class RenameIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void renameTest() throws Exception {
- String[] tableNames = getUniqueNames(2);
- String name1 = tableNames[0];
- String name2 = tableNames[1];
- BatchWriterOpts bwOpts = new BatchWriterOpts();
- ScannerOpts scanOpts = new ScannerOpts();
- TestIngest.Opts opts = new TestIngest.Opts();
- opts.createTable = true;
- opts.setTableName(name1);
-
- final ClientConfiguration clientConfig = cluster.getClientConfig();
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConfig);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- }
-
- Connector c = getConnector();
- TestIngest.ingest(c, opts, bwOpts);
- c.tableOperations().rename(name1, name2);
- TestIngest.ingest(c, opts, bwOpts);
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
-
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- vopts.updateKerberosCredentials(clientConfig);
- } else {
- vopts.setPrincipal(getAdminPrincipal());
- }
-
- vopts.setTableName(name2);
- VerifyIngest.verifyIngest(c, vopts, scanOpts);
- c.tableOperations().delete(name1);
- c.tableOperations().rename(name2, name1);
- vopts.setTableName(name1);
- VerifyIngest.verifyIngest(c, vopts, scanOpts);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
deleted file mode 100644
index 39e9bed..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-import java.util.Map.Entry;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.cluster.ClusterControl;
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.fate.zookeeper.ZooLock;
-import org.apache.accumulo.fate.zookeeper.ZooReader;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Charsets;
-
-public class RestartIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(RestartIT.class);
-
- @Override
- public int defaultTimeoutSeconds() {
- return 10 * 60;
- }
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- cfg.setProperty(Property.GC_CYCLE_DELAY, "1s");
- cfg.setProperty(Property.GC_CYCLE_START, "1s");
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- private static final ScannerOpts SOPTS = new ScannerOpts();
- private static final VerifyIngest.Opts VOPTS = new VerifyIngest.Opts();
- private static final TestIngest.Opts OPTS = new TestIngest.Opts();
- private static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
- static {
- OPTS.rows = VOPTS.rows = 10 * 1000;
- }
-
- private ExecutorService svc;
-
- @Before
- public void setup() throws Exception {
- svc = Executors.newFixedThreadPool(1);
- }
-
- @After
- public void teardown() throws Exception {
- if (null == svc) {
- return;
- }
-
- if (!svc.isShutdown()) {
- svc.shutdown();
- }
-
- while (!svc.awaitTermination(10, TimeUnit.SECONDS)) {
- log.info("Waiting for threadpool to terminate");
- }
- }
-
- @Test
- public void restartMaster() throws Exception {
- Connector c = getConnector();
- final String tableName = getUniqueNames(1)[0];
- OPTS.setTableName(tableName);
- VOPTS.setTableName(tableName);
- c.tableOperations().create(tableName);
- final AuthenticationToken token = getAdminToken();
- final ClusterControl control = getCluster().getClusterControl();
-
- final String[] args;
- if (token instanceof PasswordToken) {
- byte[] password = ((PasswordToken) token).getPassword();
- args = new String[] {"-u", getAdminPrincipal(), "-p", new String(password, Charsets.UTF_8), "-i", cluster.getInstanceName(), "-z",
- cluster.getZooKeepers(), "--rows", "" + OPTS.rows, "--table", tableName};
- OPTS.setPrincipal(getAdminPrincipal());
- VOPTS.setPrincipal(getAdminPrincipal());
- } else if (token instanceof KerberosToken) {
- ClusterUser rootUser = getAdminUser();
- args = new String[] {"-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z",
- cluster.getZooKeepers(), "--rows", "" + OPTS.rows, "--table", tableName};
- ClientConfiguration clientConfig = cluster.getClientConfig();
- OPTS.updateKerberosCredentials(clientConfig);
- VOPTS.updateKerberosCredentials(clientConfig);
- } else {
- throw new RuntimeException("Unknown token");
- }
-
- Future<Integer> ret = svc.submit(new Callable<Integer>() {
- @Override
- public Integer call() {
- try {
- return control.exec(TestIngest.class, args);
- } catch (IOException e) {
- log.error("Error running TestIngest", e);
- return -1;
- }
- }
- });
-
- control.stopAllServers(ServerType.MASTER);
- control.startAllServers(ServerType.MASTER);
- assertEquals(0, ret.get().intValue());
- VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
- }
-
- @Test
- public void restartMasterRecovery() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- OPTS.setTableName(tableName);
- VOPTS.setTableName(tableName);
- ClientConfiguration clientConfig = cluster.getClientConfig();
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- OPTS.updateKerberosCredentials(clientConfig);
- VOPTS.updateKerberosCredentials(clientConfig);
- } else {
- OPTS.setPrincipal(getAdminPrincipal());
- VOPTS.setPrincipal(getAdminPrincipal());
- }
- TestIngest.ingest(c, OPTS, BWOPTS);
- ClusterControl control = getCluster().getClusterControl();
-
- // TODO implement a kill all too?
- // cluster.stop() would also stop ZooKeeper
- control.stopAllServers(ServerType.MASTER);
- control.stopAllServers(ServerType.TRACER);
- control.stopAllServers(ServerType.TABLET_SERVER);
- control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
- control.stopAllServers(ServerType.MONITOR);
-
- ZooReader zreader = new ZooReader(c.getInstance().getZooKeepers(), c.getInstance().getZooKeepersSessionTimeOut());
- ZooCache zcache = new ZooCache(zreader, null);
- byte[] masterLockData;
- do {
- masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
- if (null != masterLockData) {
- log.info("Master lock is still held");
- Thread.sleep(1000);
- }
- } while (null != masterLockData);
-
- cluster.start();
- UtilWaitThread.sleep(5);
- control.stopAllServers(ServerType.MASTER);
-
- masterLockData = new byte[0];
- do {
- masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
- if (null != masterLockData) {
- log.info("Master lock is still held");
- Thread.sleep(1000);
- }
- } while (null != masterLockData);
- cluster.start();
- VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
- }
-
- @Test
- public void restartMasterSplit() throws Exception {
- Connector c = getConnector();
- final String tableName = getUniqueNames(1)[0];
- final AuthenticationToken token = getAdminToken();
- final ClusterControl control = getCluster().getClusterControl();
- VOPTS.setTableName(tableName);
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
-
- final String[] args;
- if (token instanceof PasswordToken) {
- byte[] password = ((PasswordToken) token).getPassword();
- args = new String[] {"-u", getAdminPrincipal(), "-p", new String(password, Charsets.UTF_8), "-i", cluster.getInstanceName(), "-z",
- cluster.getZooKeepers(), "--rows", Integer.toString(VOPTS.rows), "--table", tableName};
- OPTS.setPrincipal(getAdminPrincipal());
- VOPTS.setPrincipal(getAdminPrincipal());
- } else if (token instanceof KerberosToken) {
- ClusterUser rootUser = getAdminUser();
- args = new String[] {"-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z",
- cluster.getZooKeepers(), "--rows", Integer.toString(VOPTS.rows), "--table", tableName};
- ClientConfiguration clientConfig = cluster.getClientConfig();
- OPTS.updateKerberosCredentials(clientConfig);
- VOPTS.updateKerberosCredentials(clientConfig);
- } else {
- throw new RuntimeException("Unknown token");
- }
-
- Future<Integer> ret = svc.submit(new Callable<Integer>() {
- @Override
- public Integer call() {
- try {
- return control.exec(TestIngest.class, args);
- } catch (Exception e) {
- log.error("Error running TestIngest", e);
- return -1;
- }
- }
- });
-
- control.stopAllServers(ServerType.MASTER);
-
- ZooReader zreader = new ZooReader(c.getInstance().getZooKeepers(), c.getInstance().getZooKeepersSessionTimeOut());
- ZooCache zcache = new ZooCache(zreader, null);
- byte[] masterLockData;
- do {
- masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(c.getInstance()) + Constants.ZMASTER_LOCK, null);
- if (null != masterLockData) {
- log.info("Master lock is still held");
- Thread.sleep(1000);
- }
- } while (null != masterLockData);
-
- cluster.start();
- assertEquals(0, ret.get().intValue());
- VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
- }
-
- @Test
- public void killedTabletServer() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- OPTS.setTableName(tableName);
- VOPTS.setTableName(tableName);
- ClientConfiguration clientConfig = cluster.getClientConfig();
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- OPTS.updateKerberosCredentials(clientConfig);
- VOPTS.updateKerberosCredentials(clientConfig);
- } else {
- OPTS.setPrincipal(getAdminPrincipal());
- VOPTS.setPrincipal(getAdminPrincipal());
- }
- TestIngest.ingest(c, OPTS, BWOPTS);
- VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
- cluster.getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- cluster.start();
- VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
- }
-
- @Test
- public void killedTabletServer2() throws Exception {
- final Connector c = getConnector();
- final String[] names = getUniqueNames(2);
- final String tableName = names[0];
- final ClusterControl control = getCluster().getClusterControl();
- c.tableOperations().create(tableName);
- // Original test started and then stopped a GC. Not sure why it did this. The GC was
- // already running by default, and it would have nothing to do after only creating a table
- control.stopAllServers(ServerType.TABLET_SERVER);
-
- cluster.start();
- c.tableOperations().create(names[1]);
- }
-
- @Test
- public void killedTabletServerDuringShutdown() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- OPTS.setTableName(tableName);
- ClientConfiguration clientConfig = cluster.getClientConfig();
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- OPTS.updateKerberosCredentials(clientConfig);
- } else {
- OPTS.setPrincipal(getAdminPrincipal());
- }
- TestIngest.ingest(c, OPTS, BWOPTS);
- try {
- getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- getCluster().getClusterControl().adminStopAll();
- } finally {
- getCluster().start();
- }
- }
-
- @Test
- public void shutdownDuringCompactingSplitting() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- VOPTS.setTableName(tableName);
- ClientConfiguration clientConfig = cluster.getClientConfig();
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- OPTS.updateKerberosCredentials(clientConfig);
- VOPTS.updateKerberosCredentials(clientConfig);
- } else {
- OPTS.setPrincipal(getAdminPrincipal());
- VOPTS.setPrincipal(getAdminPrincipal());
- }
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
- String splitThreshold = null;
- for (Entry<String,String> entry : c.tableOperations().getProperties(tableName)) {
- if (entry.getKey().equals(Property.TABLE_SPLIT_THRESHOLD.getKey())) {
- splitThreshold = entry.getValue();
- break;
- }
- }
- Assert.assertNotNull(splitThreshold);
- try {
- c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "20K");
- TestIngest.Opts opts = new TestIngest.Opts();
- opts.setTableName(tableName);
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConfig);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- }
- TestIngest.ingest(c, opts, BWOPTS);
- c.tableOperations().flush(tableName, null, null, false);
- VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
- getCluster().stop();
- } finally {
- if (getClusterType() == ClusterType.STANDALONE) {
- getCluster().start();
- c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), splitThreshold);
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
deleted file mode 100644
index abfd5d8..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.cluster.ClusterControl;
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Charsets;
-
-public class RestartStressIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(RestartStressIT.class);
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> opts = cfg.getSiteConfig();
- opts.put(Property.TSERV_MAXMEM.getKey(), "100K");
- opts.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
- opts.put(Property.TSERV_WALOG_MAX_SIZE.getKey(), "1M");
- opts.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s");
- opts.put(Property.MASTER_RECOVERY_DELAY.getKey(), "1s");
- cfg.setSiteConfig(opts);
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 10 * 60;
- }
-
- private ExecutorService svc;
-
- @Before
- public void setup() throws Exception {
- svc = Executors.newFixedThreadPool(1);
- }
-
- @After
- public void teardown() throws Exception {
- if (null == svc) {
- return;
- }
-
- if (!svc.isShutdown()) {
- svc.shutdown();
- }
-
- while (!svc.awaitTermination(10, TimeUnit.SECONDS)) {
- log.info("Waiting for threadpool to terminate");
- }
- }
-
- private static final VerifyIngest.Opts VOPTS;
- static {
- VOPTS = new VerifyIngest.Opts();
- VOPTS.rows = 10 * 1000;
- }
- private static final ScannerOpts SOPTS = new ScannerOpts();
-
- @Test
- public void test() throws Exception {
- final Connector c = getConnector();
- final String tableName = getUniqueNames(1)[0];
- final AuthenticationToken token = getAdminToken();
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "500K");
- final ClusterControl control = getCluster().getClusterControl();
- final String[] args;
- if (token instanceof PasswordToken) {
- byte[] password = ((PasswordToken) token).getPassword();
- args = new String[] {"-u", getAdminPrincipal(), "-p", new String(password, Charsets.UTF_8), "-i", cluster.getInstanceName(), "-z",
- cluster.getZooKeepers(), "--rows", "" + VOPTS.rows, "--table", tableName};
- } else if (token instanceof KerberosToken) {
- ClusterUser rootUser = getAdminUser();
- args = new String[] {"-u", getAdminPrincipal(), "--keytab", rootUser.getKeytab().getAbsolutePath(), "-i", cluster.getInstanceName(), "-z",
- cluster.getZooKeepers(), "--rows", "" + VOPTS.rows, "--table", tableName};
- } else {
- throw new RuntimeException("Unrecognized token");
- }
-
- Future<Integer> retCode = svc.submit(new Callable<Integer>() {
- @Override
- public Integer call() {
- try {
- return control.exec(TestIngest.class, args);
- } catch (Exception e) {
- log.error("Error running TestIngest", e);
- return -1;
- }
- }
- });
-
- for (int i = 0; i < 2; i++) {
- UtilWaitThread.sleep(10 * 1000);
- control.stopAllServers(ServerType.TABLET_SERVER);
- control.startAllServers(ServerType.TABLET_SERVER);
- }
- assertEquals(0, retCode.get().intValue());
- VOPTS.setTableName(tableName);
-
- if (token instanceof PasswordToken) {
- VOPTS.setPrincipal(getAdminPrincipal());
- } else if (token instanceof KerberosToken) {
- VOPTS.updateKerberosCredentials(cluster.getClientConfig());
- } else {
- throw new RuntimeException("Unrecognized token");
- }
-
- VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java
deleted file mode 100644
index 75c66bd..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.apache.accumulo.test.functional.FunctionalTestUtils.checkRFiles;
-import static org.apache.accumulo.test.functional.FunctionalTestUtils.nm;
-import static org.junit.Assert.assertEquals;
-
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.iterators.user.RowDeletingIterator;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class RowDeleteIT extends AccumuloClusterHarness {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "50ms");
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void run() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
- groups.put("lg1", Collections.singleton(new Text("foo")));
- groups.put("dg", Collections.<Text> emptySet());
- c.tableOperations().setLocalityGroups(tableName, groups);
- IteratorSetting setting = new IteratorSetting(30, RowDeletingIterator.class);
- c.tableOperations().attachIterator(tableName, setting, EnumSet.of(IteratorScope.majc));
- c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "100");
-
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-
- bw.addMutation(nm("r1", "foo", "cf1", "v1"));
- bw.addMutation(nm("r1", "bar", "cf1", "v2"));
-
- bw.flush();
- c.tableOperations().flush(tableName, null, null, true);
-
- checkRFiles(c, tableName, 1, 1, 1, 1);
-
- Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
- int count = Iterators.size(scanner.iterator());
- assertEquals("count == " + count, 2, count);
-
- bw.addMutation(nm("r1", "", "", RowDeletingIterator.DELETE_ROW_VALUE));
-
- bw.flush();
- c.tableOperations().flush(tableName, null, null, true);
-
- checkRFiles(c, tableName, 1, 1, 2, 2);
-
- scanner = c.createScanner(tableName, Authorizations.EMPTY);
- count = Iterators.size(scanner.iterator());
- assertEquals("count == " + count, 3, count);
-
- c.tableOperations().compact(tableName, null, null, false, true);
-
- checkRFiles(c, tableName, 1, 1, 0, 0);
-
- scanner = c.createScanner(tableName, Authorizations.EMPTY);
- count = Iterators.size(scanner.iterator());
- assertEquals("count == " + count, 0, count);
- bw.close();
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java
deleted file mode 100644
index 863ac78..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanIdIT.java
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static com.google.common.base.Charsets.UTF_8;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.ActiveScan;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorUtil;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * ACCUMULO-2641 Integration test. ACCUMULO-2641 Adds scan id to thrift protocol so that {@code org.apache.accumulo.core.client.admin.ActiveScan.getScanid()}
- * returns a unique scan id.
- * <p>
- * <p/>
- * The test uses the Minicluster and the {@code org.apache.accumulo.test.functional.SlowIterator} to create multiple scan sessions. The test exercises multiple
- * tablet servers with splits and multiple ranges to force the scans to occur across multiple tablet servers for completeness.
- * <p/>
- * This patch modified thrift, the TraceRepoDeserializationTest test seems to fail unless the following be added:
- * <p/>
- * private static final long serialVersionUID = -4659975753252858243l;
- * <p/>
- * back into org.apache.accumulo.trace.thrift.TInfo until that test signature is regenerated.
- */
-public class ScanIdIT extends AccumuloClusterHarness {
-
- private static final Logger log = LoggerFactory.getLogger(ScanIdIT.class);
-
- private static final int NUM_SCANNERS = 8;
-
- private static final int NUM_DATA_ROWS = 100;
-
- private static final Random random = new Random();
-
- private static final ExecutorService pool = Executors.newFixedThreadPool(NUM_SCANNERS);
-
- private static final AtomicBoolean testInProgress = new AtomicBoolean(true);
-
- private static final Map<Integer,Value> resultsByWorker = new ConcurrentHashMap<Integer,Value>();
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- /**
- * @throws Exception
- * any exception is a test failure.
- */
- @Test
- public void testScanId() throws Exception {
-
- final String tableName = getUniqueNames(1)[0];
- Connector conn = getConnector();
- conn.tableOperations().create(tableName);
-
- addSplits(conn, tableName);
-
- log.info("Splits added");
-
- generateSampleData(conn, tableName);
-
- log.info("Generated data for {}", tableName);
-
- attachSlowIterator(conn, tableName);
-
- CountDownLatch latch = new CountDownLatch(NUM_SCANNERS);
-
- for (int scannerIndex = 0; scannerIndex < NUM_SCANNERS; scannerIndex++) {
- ScannerThread st = new ScannerThread(conn, scannerIndex, tableName, latch);
- pool.submit(st);
- }
-
- // wait for scanners to report a result.
- while (testInProgress.get()) {
-
- if (resultsByWorker.size() < NUM_SCANNERS) {
- log.trace("Results reported {}", resultsByWorker.size());
- UtilWaitThread.sleep(750);
- } else {
- // each worker has reported at least one result.
- testInProgress.set(false);
-
- log.debug("Final result count {}", resultsByWorker.size());
-
- // delay to allow scanners to react to end of test and cleanly close.
- UtilWaitThread.sleep(1000);
- }
-
- }
-
- // all scanner have reported at least 1 result, so check for unique scan ids.
- Set<Long> scanIds = new HashSet<Long>();
-
- List<String> tservers = conn.instanceOperations().getTabletServers();
-
- log.debug("tablet servers {}", tservers.toString());
-
- for (String tserver : tservers) {
-
- List<ActiveScan> activeScans = null;
- for (int i = 0; i < 10; i++) {
- try {
- activeScans = conn.instanceOperations().getActiveScans(tserver);
- break;
- } catch (AccumuloException e) {
- if (e.getCause() instanceof TableNotFoundException) {
- log.debug("Got TableNotFoundException, will retry");
- Thread.sleep(200);
- continue;
- }
- throw e;
- }
- }
-
- assertNotNull("Repeatedly got exception trying to active scans", activeScans);
-
- log.debug("TServer {} has {} active scans", tserver, activeScans.size());
-
- for (ActiveScan scan : activeScans) {
- log.debug("Tserver {} scan id {}", tserver, scan.getScanid());
- scanIds.add(scan.getScanid());
- }
- }
-
- assertTrue("Expected at least " + NUM_SCANNERS + " scanIds, but saw " + scanIds.size(), NUM_SCANNERS <= scanIds.size());
-
- }
-
- /**
- * Runs scanner in separate thread to allow multiple scanners to execute in parallel.
- * <p/>
- * The thread run method is terminated when the testInProgress flag is set to false.
- */
- private static class ScannerThread implements Runnable {
-
- private final Connector connector;
- private Scanner scanner = null;
- private final int workerIndex;
- private final String tablename;
- private final CountDownLatch latch;
-
- public ScannerThread(final Connector connector, final int workerIndex, final String tablename, final CountDownLatch latch) {
- this.connector = connector;
- this.workerIndex = workerIndex;
- this.tablename = tablename;
- this.latch = latch;
- }
-
- /**
- * execute the scan across the sample data and put scan result into result map until testInProgress flag is set to false.
- */
- @Override
- public void run() {
-
- latch.countDown();
- try {
- latch.await();
- } catch (InterruptedException e) {
- log.error("Thread interrupted with id {}", workerIndex);
- Thread.currentThread().interrupt();
- return;
- }
-
- log.debug("Creating scanner in worker thread {}", workerIndex);
-
- try {
-
- scanner = connector.createScanner(tablename, new Authorizations());
-
- // Never start readahead
- scanner.setReadaheadThreshold(Long.MAX_VALUE);
- scanner.setBatchSize(1);
-
- // create different ranges to try to hit more than one tablet.
- scanner.setRange(new Range(new Text(Integer.toString(workerIndex)), new Text("9")));
-
- } catch (TableNotFoundException e) {
- throw new IllegalStateException("Initialization failure. Could not create scanner", e);
- }
-
- scanner.fetchColumnFamily(new Text("fam1"));
-
- for (Map.Entry<Key,Value> entry : scanner) {
-
- // exit when success condition is met.
- if (!testInProgress.get()) {
- scanner.clearScanIterators();
- scanner.close();
-
- return;
- }
-
- Text row = entry.getKey().getRow();
-
- log.debug("worker {}, row {}", workerIndex, row.toString());
-
- if (entry.getValue() != null) {
-
- Value prevValue = resultsByWorker.put(workerIndex, entry.getValue());
-
- // value should always being increasing
- if (prevValue != null) {
-
- log.trace("worker {} values {}", workerIndex, String.format("%1$s < %2$s", prevValue, entry.getValue()));
-
- assertTrue(prevValue.compareTo(entry.getValue()) > 0);
- }
- } else {
- log.info("Scanner returned null");
- fail("Scanner returned unexpected null value");
- }
-
- }
-
- log.debug("Scanner ran out of data. (info only, not an error) ");
-
- }
- }
-
- /**
- * Create splits on table and force migration by taking table offline and then bring back online for test.
- *
- * @param conn
- * Accumulo connector Accumulo connector to test cluster or MAC instance.
- */
- private void addSplits(final Connector conn, final String tableName) {
-
- SortedSet<Text> splits = createSplits();
-
- try {
-
- conn.tableOperations().addSplits(tableName, splits);
-
- conn.tableOperations().offline(tableName, true);
-
- UtilWaitThread.sleep(2000);
- conn.tableOperations().online(tableName, true);
-
- for (Text split : conn.tableOperations().listSplits(tableName)) {
- log.trace("Split {}", split);
- }
-
- } catch (AccumuloSecurityException e) {
- throw new IllegalStateException("Initialization failed. Could not add splits to " + tableName, e);
- } catch (TableNotFoundException e) {
- throw new IllegalStateException("Initialization failed. Could not add splits to " + tableName, e);
- } catch (AccumuloException e) {
- throw new IllegalStateException("Initialization failed. Could not add splits to " + tableName, e);
- }
-
- }
-
- /**
- * Create splits to distribute data across multiple tservers.
- *
- * @return splits in sorted set for addSplits.
- */
- private SortedSet<Text> createSplits() {
-
- SortedSet<Text> splits = new TreeSet<Text>();
-
- for (int split = 0; split < 10; split++) {
- splits.add(new Text(Integer.toString(split)));
- }
-
- return splits;
- }
-
- /**
- * Generate some sample data using random row id to distribute across splits.
- * <p/>
- * The primary goal is to determine that each scanner is assigned a unique scan id. This test does check that the count value for fam1 increases if a scanner
- * reads multiple value, but this is secondary consideration for this test, that is included for completeness.
- *
- * @param connector
- * Accumulo connector Accumulo connector to test cluster or MAC instance.
- */
- private void generateSampleData(Connector connector, final String tablename) {
-
- try {
-
- BatchWriter bw = connector.createBatchWriter(tablename, new BatchWriterConfig());
-
- ColumnVisibility vis = new ColumnVisibility("public");
-
- for (int i = 0; i < NUM_DATA_ROWS; i++) {
-
- Text rowId = new Text(String.format("%d", ((random.nextInt(10) * 100) + i)));
-
- Mutation m = new Mutation(rowId);
- m.put(new Text("fam1"), new Text("count"), new Value(Integer.toString(i).getBytes(UTF_8)));
- m.put(new Text("fam1"), new Text("positive"), vis, new Value(Integer.toString(NUM_DATA_ROWS - i).getBytes(UTF_8)));
- m.put(new Text("fam1"), new Text("negative"), vis, new Value(Integer.toString(i - NUM_DATA_ROWS).getBytes(UTF_8)));
-
- log.trace("Added row {}", rowId);
-
- bw.addMutation(m);
- }
-
- bw.close();
- } catch (TableNotFoundException ex) {
- throw new IllegalStateException("Initialization failed. Could not create test data", ex);
- } catch (MutationsRejectedException ex) {
- throw new IllegalStateException("Initialization failed. Could not create test data", ex);
- }
- }
-
- /**
- * Attach the test slow iterator so that we have time to read the scan id without creating a large dataset. Uses a fairly large sleep and delay times because
- * we are not concerned with how much data is read and we do not read all of the data - the test stops once each scanner reports a scan id.
- *
- * @param connector
- * Accumulo connector Accumulo connector to test cluster or MAC instance.
- */
- private void attachSlowIterator(Connector connector, final String tablename) {
- try {
-
- IteratorSetting slowIter = new IteratorSetting(50, "slowIter", "org.apache.accumulo.test.functional.SlowIterator");
- slowIter.addOption("sleepTime", "200");
- slowIter.addOption("seekSleepTime", "200");
-
- connector.tableOperations().attachIterator(tablename, slowIter, EnumSet.of(IteratorUtil.IteratorScope.scan));
-
- } catch (AccumuloException ex) {
- throw new IllegalStateException("Initialization failed. Could not attach slow iterator", ex);
- } catch (TableNotFoundException ex) {
- throw new IllegalStateException("Initialization failed. Could not attach slow iterator", ex);
- } catch (AccumuloSecurityException ex) {
- throw new IllegalStateException("Initialization failed. Could not attach slow iterator", ex);
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
deleted file mode 100644
index 3453303..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ScannerBase;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class ScanIteratorIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 30;
- }
-
- @Test
- public void run() throws Exception {
- String tableName = getUniqueNames(1)[0];
- Connector c = getConnector();
- c.tableOperations().create(tableName);
-
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-
- for (int i = 0; i < 1000; i++) {
- Mutation m = new Mutation(new Text(String.format("%06d", i)));
- m.put(new Text("cf1"), new Text("cq1"), new Value(Integer.toString(1000 - i).getBytes(UTF_8)));
- m.put(new Text("cf1"), new Text("cq2"), new Value(Integer.toString(i - 1000).getBytes(UTF_8)));
-
- bw.addMutation(m);
- }
-
- bw.close();
-
- Scanner scanner = c.createScanner(tableName, new Authorizations());
-
- setupIter(scanner);
- verify(scanner, 1, 999);
-
- BatchScanner bscanner = c.createBatchScanner(tableName, new Authorizations(), 3);
- bscanner.setRanges(Collections.singleton(new Range((Key) null, null)));
-
- setupIter(bscanner);
- verify(bscanner, 1, 999);
-
- ArrayList<Range> ranges = new ArrayList<Range>();
- ranges.add(new Range(new Text(String.format("%06d", 1))));
- ranges.add(new Range(new Text(String.format("%06d", 6)), new Text(String.format("%06d", 16))));
- ranges.add(new Range(new Text(String.format("%06d", 20))));
- ranges.add(new Range(new Text(String.format("%06d", 23))));
- ranges.add(new Range(new Text(String.format("%06d", 56)), new Text(String.format("%06d", 61))));
- ranges.add(new Range(new Text(String.format("%06d", 501)), new Text(String.format("%06d", 504))));
- ranges.add(new Range(new Text(String.format("%06d", 998)), new Text(String.format("%06d", 1000))));
-
- HashSet<Integer> got = new HashSet<Integer>();
- HashSet<Integer> expected = new HashSet<Integer>();
- for (int i : new int[] {1, 7, 9, 11, 13, 15, 23, 57, 59, 61, 501, 503, 999}) {
- expected.add(i);
- }
-
- bscanner.setRanges(ranges);
-
- for (Entry<Key,Value> entry : bscanner) {
- got.add(Integer.parseInt(entry.getKey().getRow().toString()));
- }
-
- System.out.println("got : " + got);
-
- if (!got.equals(expected)) {
- throw new Exception(got + " != " + expected);
- }
-
- bscanner.close();
-
- }
-
- private void verify(Iterable<Entry<Key,Value>> scanner, int start, int finish) throws Exception {
-
- int expected = start;
- for (Entry<Key,Value> entry : scanner) {
- if (Integer.parseInt(entry.getKey().getRow().toString()) != expected) {
- throw new Exception("Saw unexpexted " + entry.getKey().getRow() + " " + expected);
- }
-
- if (entry.getKey().getColumnQualifier().toString().equals("cq2")) {
- expected += 2;
- }
- }
-
- if (expected != finish + 2) {
- throw new Exception("Ended at " + expected + " not " + (finish + 2));
- }
- }
-
- private void setupIter(ScannerBase scanner) throws Exception {
- IteratorSetting dropMod = new IteratorSetting(50, "dropMod", "org.apache.accumulo.test.functional.DropModIter");
- dropMod.addOption("mod", "2");
- dropMod.addOption("drop", "0");
- scanner.addScanIterator(dropMod);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
deleted file mode 100644
index bd7555e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Map.Entry;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class ScanRangeIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- private static final int TS_LIMIT = 1;
- private static final int CQ_LIMIT = 5;
- private static final int CF_LIMIT = 5;
- private static final int ROW_LIMIT = 100;
-
- @Test
- public void run() throws Exception {
- Connector c = getConnector();
- String[] tableNames = getUniqueNames(2);
- String table1 = tableNames[0];
- c.tableOperations().create(table1);
- String table2 = tableNames[1];
- c.tableOperations().create(table2);
- TreeSet<Text> splitRows = new TreeSet<Text>();
- int splits = 3;
- for (int i = (ROW_LIMIT / splits); i < ROW_LIMIT; i += (ROW_LIMIT / splits))
- splitRows.add(createRow(i));
- c.tableOperations().addSplits(table2, splitRows);
-
- insertData(c, table1);
- scanTable(c, table1);
-
- insertData(c, table2);
- scanTable(c, table2);
- }
-
- private void scanTable(Connector c, String table) throws Exception {
- scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(1, 0, 0, 0));
-
- scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
-
- scanRange(c, table, null, null);
-
- for (int i = 0; i < ROW_LIMIT; i += (ROW_LIMIT / 3)) {
- for (int j = 0; j < CF_LIMIT; j += (CF_LIMIT / 2)) {
- for (int k = 1; k < CQ_LIMIT; k += (CQ_LIMIT / 2)) {
- scanRange(c, table, null, new IntKey(i, j, k, 0));
- scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(i, j, k, 0));
-
- scanRange(c, table, new IntKey(i, j, k, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
-
- scanRange(c, table, new IntKey(i, j, k, 0), null);
-
- }
- }
- }
-
- for (int i = 0; i < ROW_LIMIT; i++) {
- scanRange(c, table, new IntKey(i, 0, 0, 0), new IntKey(i, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
-
- if (i > 0 && i < ROW_LIMIT - 1) {
- scanRange(c, table, new IntKey(i - 1, 0, 0, 0), new IntKey(i + 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
- }
- }
-
- }
-
- private static class IntKey {
- private int row;
- private int cf;
- private int cq;
- private long ts;
-
- IntKey(IntKey ik) {
- this.row = ik.row;
- this.cf = ik.cf;
- this.cq = ik.cq;
- this.ts = ik.ts;
- }
-
- IntKey(int row, int cf, int cq, long ts) {
- this.row = row;
- this.cf = cf;
- this.cq = cq;
- this.ts = ts;
- }
-
- Key createKey() {
- Text trow = createRow(row);
- Text tcf = createCF(cf);
- Text tcq = createCQ(cq);
-
- return new Key(trow, tcf, tcq, ts);
- }
-
- IntKey increment() {
-
- IntKey ik = new IntKey(this);
-
- ik.ts++;
- if (ik.ts >= TS_LIMIT) {
- ik.ts = 0;
- ik.cq++;
- if (ik.cq >= CQ_LIMIT) {
- ik.cq = 0;
- ik.cf++;
- if (ik.cf >= CF_LIMIT) {
- ik.cf = 0;
- ik.row++;
- }
- }
- }
-
- return ik;
- }
-
- }
-
- private void scanRange(Connector c, String table, IntKey ik1, IntKey ik2) throws Exception {
- scanRange(c, table, ik1, false, ik2, false);
- scanRange(c, table, ik1, false, ik2, true);
- scanRange(c, table, ik1, true, ik2, false);
- scanRange(c, table, ik1, true, ik2, true);
- }
-
- private void scanRange(Connector c, String table, IntKey ik1, boolean inclusive1, IntKey ik2, boolean inclusive2) throws Exception {
- Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
-
- Key key1 = null;
- Key key2 = null;
-
- IntKey expectedIntKey;
- IntKey expectedEndIntKey;
-
- if (ik1 != null) {
- key1 = ik1.createKey();
- expectedIntKey = ik1;
-
- if (!inclusive1) {
- expectedIntKey = expectedIntKey.increment();
- }
- } else {
- expectedIntKey = new IntKey(0, 0, 0, 0);
- }
-
- if (ik2 != null) {
- key2 = ik2.createKey();
- expectedEndIntKey = ik2;
-
- if (inclusive2) {
- expectedEndIntKey = expectedEndIntKey.increment();
- }
- } else {
- expectedEndIntKey = new IntKey(ROW_LIMIT, 0, 0, 0);
- }
-
- Range range = new Range(key1, inclusive1, key2, inclusive2);
-
- scanner.setRange(range);
-
- for (Entry<Key,Value> entry : scanner) {
-
- Key expectedKey = expectedIntKey.createKey();
- if (!expectedKey.equals(entry.getKey())) {
- throw new Exception(" " + expectedKey + " != " + entry.getKey());
- }
-
- expectedIntKey = expectedIntKey.increment();
- }
-
- if (!expectedIntKey.createKey().equals(expectedEndIntKey.createKey())) {
- throw new Exception(" " + expectedIntKey.createKey() + " != " + expectedEndIntKey.createKey());
- }
- }
-
- private static Text createCF(int cf) {
- Text tcf = new Text(String.format("cf_%03d", cf));
- return tcf;
- }
-
- private static Text createCQ(int cf) {
- Text tcf = new Text(String.format("cq_%03d", cf));
- return tcf;
- }
-
- private static Text createRow(int row) {
- Text trow = new Text(String.format("r_%06d", row));
- return trow;
- }
-
- private void insertData(Connector c, String table) throws Exception {
-
- BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
-
- for (int i = 0; i < ROW_LIMIT; i++) {
- Mutation m = new Mutation(createRow(i));
-
- for (int j = 0; j < CF_LIMIT; j++) {
- for (int k = 0; k < CQ_LIMIT; k++) {
- for (int t = 0; t < TS_LIMIT; t++) {
- m.put(createCF(j), createCQ(k), t, new Value(String.format("%06d_%03d_%03d_%03d", i, j, k, t).getBytes(UTF_8)));
- }
- }
- }
-
- bw.addMutation(m);
- }
-
- bw.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
deleted file mode 100644
index 0636056..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.InstanceOperations;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ScanSessionTimeOutIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(ScanSessionTimeOutIT.class);
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.TSERV_SESSION_MAXIDLE.getKey(), "3");
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- private String sessionIdle = null;
-
- @Before
- public void reduceSessionIdle() throws Exception {
- InstanceOperations ops = getConnector().instanceOperations();
- sessionIdle = ops.getSystemConfiguration().get(Property.TSERV_SESSION_MAXIDLE.getKey());
- ops.setProperty(Property.TSERV_SESSION_MAXIDLE.getKey(), "3");
- log.info("Waiting for existing session idle time to expire");
- Thread.sleep(AccumuloConfiguration.getTimeInMillis(sessionIdle));
- log.info("Finished waiting");
- }
-
- @After
- public void resetSessionIdle() throws Exception {
- if (null != sessionIdle) {
- getConnector().instanceOperations().setProperty(Property.TSERV_SESSION_MAXIDLE.getKey(), sessionIdle);
- }
- }
-
- @Test
- public void run() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
-
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-
- for (int i = 0; i < 100000; i++) {
- Mutation m = new Mutation(new Text(String.format("%08d", i)));
- for (int j = 0; j < 3; j++)
- m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(UTF_8)));
-
- bw.addMutation(m);
- }
-
- bw.close();
-
- Scanner scanner = c.createScanner(tableName, new Authorizations());
- scanner.setBatchSize(1000);
-
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
-
- verify(iter, 0, 200);
-
- // sleep three times the session timeout
- UtilWaitThread.sleep(9000);
-
- verify(iter, 200, 100000);
-
- }
-
- private void verify(Iterator<Entry<Key,Value>> iter, int start, int stop) throws Exception {
- for (int i = start; i < stop; i++) {
-
- Text er = new Text(String.format("%08d", i));
-
- for (int j = 0; j < 3; j++) {
- Entry<Key,Value> entry = iter.next();
-
- if (!entry.getKey().getRow().equals(er)) {
- throw new Exception("row " + entry.getKey().getRow() + " != " + er);
- }
-
- if (!entry.getKey().getColumnFamily().equals(new Text("cf1"))) {
- throw new Exception("cf " + entry.getKey().getColumnFamily() + " != cf1");
- }
-
- if (!entry.getKey().getColumnQualifier().equals(new Text("cq" + j))) {
- throw new Exception("cq " + entry.getKey().getColumnQualifier() + " != cq" + j);
- }
-
- if (!entry.getValue().toString().equals("" + i + "_" + j)) {
- throw new Exception("value " + entry.getValue() + " != " + i + "_" + j);
- }
-
- }
- }
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ScannerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScannerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScannerIT.java
deleted file mode 100644
index 340a58e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScannerIT.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.base.Stopwatch;
-
-/**
- *
- */
-public class ScannerIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void testScannerReadaheadConfiguration() throws Exception {
- final String table = getUniqueNames(1)[0];
- Connector c = getConnector();
- c.tableOperations().create(table);
-
- BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
-
- Mutation m = new Mutation("a");
- for (int i = 0; i < 10; i++) {
- m.put(Integer.toString(i), "", "");
- }
-
- bw.addMutation(m);
- bw.close();
-
- Scanner s = c.createScanner(table, new Authorizations());
-
- IteratorSetting cfg = new IteratorSetting(100, SlowIterator.class);
- // A batch size of one will end up calling seek() for each element with no calls to next()
- SlowIterator.setSeekSleepTime(cfg, 100l);
-
- s.addScanIterator(cfg);
- // Never start readahead
- s.setReadaheadThreshold(Long.MAX_VALUE);
- s.setBatchSize(1);
- s.setRange(new Range());
-
- Stopwatch sw = new Stopwatch();
- Iterator<Entry<Key,Value>> iterator = s.iterator();
-
- sw.start();
- while (iterator.hasNext()) {
- sw.stop();
-
- // While we "do work" in the client, we should be fetching the next result
- UtilWaitThread.sleep(100l);
- iterator.next();
- sw.start();
- }
- sw.stop();
-
- long millisWithWait = sw.elapsed(TimeUnit.MILLISECONDS);
-
- s = c.createScanner(table, new Authorizations());
- s.addScanIterator(cfg);
- s.setRange(new Range());
- s.setBatchSize(1);
- s.setReadaheadThreshold(0l);
-
- sw = new Stopwatch();
- iterator = s.iterator();
-
- sw.start();
- while (iterator.hasNext()) {
- sw.stop();
-
- // While we "do work" in the client, we should be fetching the next result
- UtilWaitThread.sleep(100l);
- iterator.next();
- sw.start();
- }
- sw.stop();
-
- long millisWithNoWait = sw.elapsed(TimeUnit.MILLISECONDS);
-
- // The "no-wait" time should be much less than the "wait-time"
- Assert.assertTrue("Expected less time to be taken with immediate readahead (" + millisWithNoWait + ") than without immediate readahead (" + millisWithWait
- + ")", millisWithNoWait < millisWithWait);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
deleted file mode 100644
index 02b65f4..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.Combiner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class ServerSideErrorIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void run() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- IteratorSetting is = new IteratorSetting(5, "Bad Aggregator", BadCombiner.class);
- Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("acf")));
- c.tableOperations().attachIterator(tableName, is);
-
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
-
- Mutation m = new Mutation(new Text("r1"));
- m.put(new Text("acf"), new Text("foo"), new Value(new byte[] {'1'}));
-
- bw.addMutation(m);
-
- bw.close();
-
- // try to scan table
- Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
-
- boolean caught = false;
- try {
- for (Entry<Key,Value> entry : scanner) {
- entry.getKey();
- }
- } catch (Exception e) {
- caught = true;
- }
-
- if (!caught)
- throw new Exception("Scan did not fail");
-
- // try to batch scan the table
- BatchScanner bs = c.createBatchScanner(tableName, Authorizations.EMPTY, 2);
- bs.setRanges(Collections.singleton(new Range()));
-
- caught = false;
- try {
- for (Entry<Key,Value> entry : bs) {
- entry.getKey();
- }
- } catch (Exception e) {
- caught = true;
- } finally {
- bs.close();
- }
-
- if (!caught)
- throw new Exception("batch scan did not fail");
-
- // remove the bad agg so accumulo can shutdown
- TableOperations to = c.tableOperations();
- for (Entry<String,String> e : to.getProperties(tableName)) {
- to.removeProperty(tableName, e.getKey());
- }
-
- UtilWaitThread.sleep(500);
-
- // should be able to scan now
- scanner = c.createScanner(tableName, Authorizations.EMPTY);
- for (Entry<Key,Value> entry : scanner) {
- entry.getKey();
- }
-
- // set a non existant iterator, should cause scan to fail on server side
- scanner.addScanIterator(new IteratorSetting(100, "bogus", "com.bogus.iterator"));
-
- caught = false;
- try {
- for (Entry<Key,Value> entry : scanner) {
- // should error
- entry.getKey();
- }
- } catch (Exception e) {
- caught = true;
- }
-
- if (!caught)
- throw new Exception("Scan did not fail");
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
deleted file mode 100644
index 36bdd7a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/SessionDurabilityIT.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.ConditionalWriter;
-import org.apache.accumulo.core.client.ConditionalWriter.Status;
-import org.apache.accumulo.core.client.ConditionalWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Durability;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Condition;
-import org.apache.accumulo.core.data.ConditionalMutation;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class SessionDurabilityIT extends ConfigurableMacBase {
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- }
-
- @Test(timeout = 3 * 60 * 1000)
- public void nondurableTableHasDurableWrites() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- // table default has no durability
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "none");
- // send durable writes
- BatchWriterConfig cfg = new BatchWriterConfig();
- cfg.setDurability(Durability.SYNC);
- writeSome(tableName, 10, cfg);
- assertEquals(10, count(tableName));
- // verify writes servive restart
- restartTServer();
- assertEquals(10, count(tableName));
- }
-
- @Test(timeout = 3 * 60 * 1000)
- public void durableTableLosesNonDurableWrites() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- // table default is durable writes
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "sync");
- // write with no durability
- BatchWriterConfig cfg = new BatchWriterConfig();
- cfg.setDurability(Durability.NONE);
- writeSome(tableName, 10, cfg);
- // verify writes are lost on restart
- restartTServer();
- assertTrue(10 > count(tableName));
- }
-
- private int count(String tableName) throws Exception {
- return Iterators.size(getConnector().createScanner(tableName, Authorizations.EMPTY).iterator());
- }
-
- private void writeSome(String tableName, int n, BatchWriterConfig cfg) throws Exception {
- Connector c = getConnector();
- BatchWriter bw = c.createBatchWriter(tableName, cfg);
- for (int i = 0; i < n; i++) {
- Mutation m = new Mutation(i + "");
- m.put("", "", "");
- bw.addMutation(m);
- }
- bw.close();
- }
-
- @Test(timeout = 3 * 60 * 1000)
- public void testConditionDurability() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- // table default is durable writes
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "sync");
- // write without durability
- ConditionalWriterConfig cfg = new ConditionalWriterConfig();
- cfg.setDurability(Durability.NONE);
- conditionWriteSome(tableName, 10, cfg);
- // everything in there?
- assertEquals(10, count(tableName));
- // restart the server and verify the updates are lost
- restartTServer();
- assertEquals(0, count(tableName));
- }
-
- @Test(timeout = 3 * 60 * 1000)
- public void testConditionDurability2() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- // table default is durable writes
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "none");
- // write with durability
- ConditionalWriterConfig cfg = new ConditionalWriterConfig();
- cfg.setDurability(Durability.SYNC);
- conditionWriteSome(tableName, 10, cfg);
- // everything in there?
- assertEquals(10, count(tableName));
- // restart the server and verify the updates are still there
- restartTServer();
- assertEquals(10, count(tableName));
- }
-
- private void conditionWriteSome(String tableName, int n, ConditionalWriterConfig cfg) throws Exception {
- Connector c = getConnector();
- ConditionalWriter cw = c.createConditionalWriter(tableName, cfg);
- for (int i = 0; i < n; i++) {
- ConditionalMutation m = new ConditionalMutation((CharSequence) (i + ""), new Condition("", ""));
- m.put("", "", "X");
- assertEquals(Status.ACCEPTED, cw.write(m).getStatus());
- }
- }
-
- private void restartTServer() throws Exception {
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
- cluster.start();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
deleted file mode 100644
index f27ee02..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.server.util.Admin;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.TestRandomDeletes;
-import org.apache.accumulo.test.VerifyIngest;
-import org.junit.Test;
-
-public class ShutdownIT extends ConfigurableMacBase {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void shutdownDuringIngest() throws Exception {
- Process ingest = cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD,
- "--createTable");
- UtilWaitThread.sleep(100);
- assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
- ingest.destroy();
- }
-
- @Test
- public void shutdownDuringQuery() throws Exception {
- assertEquals(0,
- cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable")
- .waitFor());
- Process verify = cluster.exec(VerifyIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD);
- UtilWaitThread.sleep(100);
- assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
- verify.destroy();
- }
-
- @Test
- public void shutdownDuringDelete() throws Exception {
- assertEquals(0,
- cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable")
- .waitFor());
- Process deleter = cluster.exec(TestRandomDeletes.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD);
- UtilWaitThread.sleep(100);
- assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
- deleter.destroy();
- }
-
- @Test
- public void shutdownDuringDeleteTable() throws Exception {
- final Connector c = getConnector();
- for (int i = 0; i < 10; i++) {
- c.tableOperations().create("table" + i);
- }
- final AtomicReference<Exception> ref = new AtomicReference<Exception>();
- Thread async = new Thread() {
- @Override
- public void run() {
- try {
- for (int i = 0; i < 10; i++)
- c.tableOperations().delete("table" + i);
- } catch (Exception ex) {
- ref.set(ex);
- }
- }
- };
- async.start();
- UtilWaitThread.sleep(100);
- assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
- if (ref.get() != null)
- throw ref.get();
- }
-
- @Test
- public void stopDuringStart() throws Exception {
- assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
- }
-
- @Test
- public void adminStop() throws Exception {
- runAdminStopTest(getConnector(), cluster);
- }
-
- static void runAdminStopTest(Connector c, MiniAccumuloClusterImpl cluster) throws InterruptedException, IOException {
- assertEquals(0,
- cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "--createTable")
- .waitFor());
- List<String> tabletServers = c.instanceOperations().getTabletServers();
- assertEquals(2, tabletServers.size());
- String doomed = tabletServers.get(0);
- assertEquals(0, cluster.exec(Admin.class, "stop", doomed).waitFor());
- tabletServers = c.instanceOperations().getTabletServers();
- assertEquals(1, tabletServers.size());
- assertFalse(tabletServers.get(0).equals(doomed));
- }
-
-}
[33/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
new file mode 100644
index 0000000..2251d4b
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.fail;
+
+import java.util.Map;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.impl.MasterClient;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.master.thrift.MasterClientService;
+import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
+import org.apache.accumulo.core.master.thrift.TableInfo;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.trace.Tracer;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class DynamicThreadPoolsIT extends AccumuloClusterHarness {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ private String majcDelay;
+
+ @Before
+ public void updateMajcDelay() throws Exception {
+ Connector c = getConnector();
+ majcDelay = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
+ c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
+ if (getClusterType() == ClusterType.STANDALONE) {
+ Thread.sleep(AccumuloConfiguration.getTimeInMillis(majcDelay));
+ }
+ }
+
+ @After
+ public void resetMajcDelay() throws Exception {
+ Connector c = getConnector();
+ c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
+ }
+
+ @Test
+ public void test() throws Exception {
+ final String[] tables = getUniqueNames(15);
+ String firstTable = tables[0];
+ Connector c = getConnector();
+ c.instanceOperations().setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "5");
+ TestIngest.Opts opts = new TestIngest.Opts();
+ opts.rows = 500 * 1000;
+ opts.createTable = true;
+ opts.setTableName(firstTable);
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConf);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ }
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+ c.tableOperations().flush(firstTable, null, null, true);
+ for (int i = 1; i < tables.length; i++)
+ c.tableOperations().clone(firstTable, tables[i], true, null, null);
+ UtilWaitThread.sleep(11 * 1000); // time between checks of the thread pool sizes
+ Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
+ for (int i = 1; i < tables.length; i++)
+ c.tableOperations().compact(tables[i], null, null, true, false);
+ for (int i = 0; i < 30; i++) {
+ int count = 0;
+ MasterClientService.Iface client = null;
+ MasterMonitorInfo stats = null;
+ try {
+ client = MasterClient.getConnectionWithRetry(new ClientContext(c.getInstance(), creds, clientConf));
+ stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(c.getInstance()));
+ } finally {
+ if (client != null)
+ MasterClient.close(client);
+ }
+ for (TabletServerStatus server : stats.tServerInfo) {
+ for (TableInfo table : server.tableMap.values()) {
+ count += table.majors.running;
+ }
+ }
+ System.out.println("count " + count);
+ if (count > 3)
+ return;
+ UtilWaitThread.sleep(500);
+ }
+ fail("Could not observe higher number of threads after changing the config");
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java
new file mode 100644
index 0000000..9d0ce86
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ExamplesIT.java
@@ -0,0 +1,660 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static com.google.common.base.Charsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
+import org.apache.accumulo.cluster.standalone.StandaloneClusterControl;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.user.AgeOffFilter;
+import org.apache.accumulo.core.iterators.user.SummingCombiner;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.examples.simple.client.Flush;
+import org.apache.accumulo.examples.simple.client.RandomBatchScanner;
+import org.apache.accumulo.examples.simple.client.RandomBatchWriter;
+import org.apache.accumulo.examples.simple.client.ReadWriteExample;
+import org.apache.accumulo.examples.simple.client.RowOperations;
+import org.apache.accumulo.examples.simple.client.SequentialBatchWriter;
+import org.apache.accumulo.examples.simple.client.TraceDumpExample;
+import org.apache.accumulo.examples.simple.client.TracingExample;
+import org.apache.accumulo.examples.simple.combiner.StatsCombiner;
+import org.apache.accumulo.examples.simple.constraints.MaxMutationSize;
+import org.apache.accumulo.examples.simple.dirlist.Ingest;
+import org.apache.accumulo.examples.simple.dirlist.QueryUtil;
+import org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter;
+import org.apache.accumulo.examples.simple.helloworld.ReadData;
+import org.apache.accumulo.examples.simple.isolation.InterferenceTest;
+import org.apache.accumulo.examples.simple.mapreduce.RegexExample;
+import org.apache.accumulo.examples.simple.mapreduce.RowHash;
+import org.apache.accumulo.examples.simple.mapreduce.TableToFile;
+import org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest;
+import org.apache.accumulo.examples.simple.mapreduce.WordCount;
+import org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample;
+import org.apache.accumulo.examples.simple.mapreduce.bulk.GenerateTestData;
+import org.apache.accumulo.examples.simple.mapreduce.bulk.SetupTable;
+import org.apache.accumulo.examples.simple.mapreduce.bulk.VerifyIngest;
+import org.apache.accumulo.examples.simple.shard.ContinuousQuery;
+import org.apache.accumulo.examples.simple.shard.Index;
+import org.apache.accumulo.examples.simple.shard.Query;
+import org.apache.accumulo.examples.simple.shard.Reverse;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.MemoryUnit;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.LogWriter;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.start.Main;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.tracer.TraceServer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.util.Tool;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+
+public class ExamplesIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(ExamplesIT.class);
+ private static final BatchWriterOpts bwOpts = new BatchWriterOpts();
+ private static final BatchWriterConfig bwc = new BatchWriterConfig();
+ private static final String visibility = "A|B";
+ private static final String auths = "A,B";
+
+ Connector c;
+ String instance;
+ String keepers;
+ String user;
+ String passwd;
+ String keytab;
+ BatchWriter bw;
+ IteratorSetting is;
+ String dir;
+ FileSystem fs;
+ Authorizations origAuths;
+ boolean saslEnabled;
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopConf) {
+ // 128MB * 3
+ cfg.setDefaultMemory(cfg.getDefaultMemory() * 3, MemoryUnit.BYTE);
+ }
+
+ @Before
+ public void getClusterInfo() throws Exception {
+ c = getConnector();
+ user = getAdminPrincipal();
+ AuthenticationToken token = getAdminToken();
+ if (token instanceof KerberosToken) {
+ keytab = getAdminUser().getKeytab().getAbsolutePath();
+ saslEnabled = true;
+ } else if (token instanceof PasswordToken) {
+ passwd = new String(((PasswordToken) getAdminToken()).getPassword(), UTF_8);
+ saslEnabled = false;
+ } else {
+ Assert.fail("Unknown token type: " + token);
+ }
+ fs = getCluster().getFileSystem();
+ instance = c.getInstance().getInstanceName();
+ keepers = c.getInstance().getZooKeepers();
+ dir = new Path(cluster.getTemporaryPath(), getClass().getName()).toString();
+
+ origAuths = c.securityOperations().getUserAuthorizations(user);
+ c.securityOperations().changeUserAuthorizations(user, new Authorizations(auths.split(",")));
+ }
+
+ @After
+ public void resetAuths() throws Exception {
+ if (null != origAuths) {
+ getConnector().securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
+ }
+ }
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 6 * 60;
+ }
+
+ @Test
+ public void testTrace() throws Exception {
+ Process trace = null;
+ if (ClusterType.MINI == getClusterType()) {
+ MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
+ trace = impl.exec(TraceServer.class);
+ while (!c.tableOperations().exists("trace"))
+ UtilWaitThread.sleep(500);
+ }
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-C", "-D", "-c"};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-C", "-D", "-c"};
+ }
+ Entry<Integer,String> pair = cluster.getClusterControl().execWithStdout(TracingExample.class, args);
+ Assert.assertEquals("Expected return code of zero. STDOUT=" + pair.getValue(), 0, pair.getKey().intValue());
+ String result = pair.getValue();
+ Pattern pattern = Pattern.compile("TraceID: ([0-9a-f]+)");
+ Matcher matcher = pattern.matcher(result);
+ int count = 0;
+ while (matcher.find()) {
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--traceid", matcher.group(1)};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--traceid", matcher.group(1)};
+ }
+ pair = cluster.getClusterControl().execWithStdout(TraceDumpExample.class, args);
+ count++;
+ }
+ assertTrue(count > 0);
+ assertTrue("Output did not contain myApp@myHost", pair.getValue().contains("myApp@myHost"));
+ if (ClusterType.MINI == getClusterType() && null != trace) {
+ trace.destroy();
+ }
+ }
+
+ @Test
+ public void testClasspath() throws Exception {
+ Entry<Integer,String> entry = getCluster().getClusterControl().execWithStdout(Main.class, new String[] {"classpath"});
+ assertEquals(0, entry.getKey().intValue());
+ String result = entry.getValue();
+ int level1 = result.indexOf("Level 1");
+ int level2 = result.indexOf("Level 2");
+ int level3 = result.indexOf("Level 3");
+ int level4 = result.indexOf("Level 4");
+ assertTrue("Level 1 classloader not present.", level1 >= 0);
+ assertTrue("Level 2 classloader not present.", level2 > 0);
+ assertTrue("Level 3 classloader not present.", level3 > 0);
+ assertTrue("Level 4 classloader not present.", level4 > 0);
+ assertTrue(level1 < level2);
+ assertTrue(level2 < level3);
+ assertTrue(level3 < level4);
+ }
+
+ @Test
+ public void testDirList() throws Exception {
+ String[] names = getUniqueNames(3);
+ String dirTable = names[0], indexTable = names[1], dataTable = names[2];
+ String[] args;
+ String dirListDirectory;
+ switch (getClusterType()) {
+ case MINI:
+ dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir().getAbsolutePath();
+ break;
+ case STANDALONE:
+ dirListDirectory = ((StandaloneAccumuloCluster) getCluster()).getAccumuloHome();
+ break;
+ default:
+ throw new RuntimeException("Unknown cluster type");
+ }
+ // Index a directory listing on /tmp. If this is running against a standalone cluster, we can't guarantee Accumulo source will be there.
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
+ dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable",
+ dataTable, "--vis", visibility, "--chunkSize", Integer.toString(10000), dirListDirectory};
+ }
+ Entry<Integer,String> entry = getClusterControl().execWithStdout(Ingest.class, args);
+ assertEquals("Got non-zero return code. Stdout=" + entry.getValue(), 0, entry.getKey().intValue());
+
+ String expectedFile;
+ switch (getClusterType()) {
+ case MINI:
+ // Should be present in a minicluster dir
+ expectedFile = "accumulo-site.xml";
+ break;
+ case STANDALONE:
+ // Should be in place on standalone installs (not having ot follow symlinks)
+ expectedFile = "LICENSE";
+ break;
+ default:
+ throw new RuntimeException("Unknown cluster type");
+ }
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "--keytab", keytab, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path",
+ expectedFile};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-p", passwd, "-u", user, "-t", indexTable, "--auths", auths, "--search", "--path", expectedFile};
+ }
+ entry = getClusterControl().execWithStdout(QueryUtil.class, args);
+ if (ClusterType.MINI == getClusterType()) {
+ MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
+ for (LogWriter writer : impl.getLogWriters()) {
+ writer.flush();
+ }
+ }
+
+ log.info("result " + entry.getValue());
+ assertEquals(0, entry.getKey().intValue());
+ assertTrue(entry.getValue().contains(expectedFile));
+ }
+
+ @Test
+ public void testAgeoffFilter() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ is = new IteratorSetting(10, AgeOffFilter.class);
+ AgeOffFilter.setTTL(is, 1000L);
+ c.tableOperations().attachIterator(tableName, is);
+ UtilWaitThread.sleep(500); // let zookeeper updates propagate.
+ bw = c.createBatchWriter(tableName, bwc);
+ Mutation m = new Mutation("foo");
+ m.put("a", "b", "c");
+ bw.addMutation(m);
+ bw.close();
+ UtilWaitThread.sleep(1000);
+ assertEquals(0, Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator()));
+ }
+
+ @Test
+ public void testStatsCombiner() throws Exception {
+ String table = getUniqueNames(1)[0];
+ c.tableOperations().create(table);
+ is = new IteratorSetting(10, StatsCombiner.class);
+ StatsCombiner.setCombineAllColumns(is, true);
+
+ c.tableOperations().attachIterator(table, is);
+ bw = c.createBatchWriter(table, bwc);
+ // Write two mutations otherwise the NativeMap would dedupe them into a single update
+ Mutation m = new Mutation("foo");
+ m.put("a", "b", "1");
+ bw.addMutation(m);
+ m = new Mutation("foo");
+ m.put("a", "b", "3");
+ bw.addMutation(m);
+ bw.flush();
+
+ Iterator<Entry<Key,Value>> iter = c.createScanner(table, Authorizations.EMPTY).iterator();
+ assertTrue("Iterator had no results", iter.hasNext());
+ Entry<Key,Value> e = iter.next();
+ assertEquals("Results ", "1,3,4,2", e.getValue().toString());
+ assertFalse("Iterator had additional results", iter.hasNext());
+
+ m = new Mutation("foo");
+ m.put("a", "b", "0,20,20,2");
+ bw.addMutation(m);
+ bw.close();
+
+ iter = c.createScanner(table, Authorizations.EMPTY).iterator();
+ assertTrue("Iterator had no results", iter.hasNext());
+ e = iter.next();
+ assertEquals("Results ", "0,20,24,4", e.getValue().toString());
+ assertFalse("Iterator had additional results", iter.hasNext());
+ }
+
+ @Test
+ public void testBloomFilters() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "100000", "--min", "0", "--max",
+ "1000000000", "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName};
+ } else {
+ args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "100000", "--min", "0", "--max", "1000000000",
+ "--size", "50", "--batchMemory", "2M", "--batchLatency", "60s", "--batchThreads", "3", "-t", tableName};
+ }
+ goodExec(RandomBatchWriter.class, args);
+ c.tableOperations().flush(tableName, null, null, true);
+ long diff = 0, diff2 = 0;
+ // try the speed test a couple times in case the system is loaded with other tests
+ for (int i = 0; i < 2; i++) {
+ long now = System.currentTimeMillis();
+ if (saslEnabled) {
+ args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max",
+ "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName};
+ } else {
+ args = new String[] {"--seed", "7", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000",
+ "--size", "50", "--scanThreads", "4", "-t", tableName};
+ }
+ goodExec(RandomBatchScanner.class, args);
+ diff = System.currentTimeMillis() - now;
+ now = System.currentTimeMillis();
+ if (saslEnabled) {
+ args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--num", "10000", "--min", "0", "--max",
+ "1000000000", "--size", "50", "--scanThreads", "4", "-t", tableName};
+ } else {
+ args = new String[] {"--seed", "8", "-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--num", "10000", "--min", "0", "--max", "1000000000",
+ "--size", "50", "--scanThreads", "4", "-t", tableName};
+ }
+ int retCode = getClusterControl().exec(RandomBatchScanner.class, args);
+ assertEquals(1, retCode);
+ diff2 = System.currentTimeMillis() - now;
+ if (diff2 < diff)
+ break;
+ }
+ assertTrue(diff2 < diff);
+ }
+
+ @Test
+ public void testShardedIndex() throws Exception {
+ String[] names = getUniqueNames(3);
+ final String shard = names[0], index = names[1];
+ c.tableOperations().create(shard);
+ c.tableOperations().create(index);
+ bw = c.createBatchWriter(shard, bwc);
+ Index.index(30, new File(System.getProperty("user.dir") + "/src"), "\\W+", bw);
+ bw.close();
+ BatchScanner bs = c.createBatchScanner(shard, Authorizations.EMPTY, 4);
+ List<String> found = Query.query(bs, Arrays.asList("foo", "bar"));
+ bs.close();
+ // should find ourselves
+ boolean thisFile = false;
+ for (String file : found) {
+ if (file.endsWith("/ExamplesIT.java"))
+ thisFile = true;
+ }
+ assertTrue(thisFile);
+
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", getAdminPrincipal(), "-p", passwd};
+ }
+ // create a reverse index
+ goodExec(Reverse.class, args);
+
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "--keytab", keytab, "--terms", "5",
+ "--count", "1000"};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "--shardTable", shard, "--doc2Term", index, "-u", user, "-p", passwd, "--terms", "5", "--count",
+ "1000"};
+ }
+ // run some queries
+ goodExec(ContinuousQuery.class, args);
+ }
+
+ @Test
+ public void testMaxMutationConstraint() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().addConstraint(tableName, MaxMutationSize.class.getName());
+ TestIngest.Opts opts = new TestIngest.Opts();
+ opts.rows = 1;
+ opts.cols = 1000;
+ opts.setTableName(tableName);
+ if (saslEnabled) {
+ opts.updateKerberosCredentials(cluster.getClientConfig());
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ }
+ try {
+ TestIngest.ingest(c, opts, bwOpts);
+ } catch (MutationsRejectedException ex) {
+ assertEquals(1, ex.getConstraintViolationSummaries().size());
+ }
+ }
+
+ @Test
+ public void testBulkIngest() throws Exception {
+ // TODO Figure out a way to run M/R with Kerberos
+ Assume.assumeTrue(getAdminToken() instanceof PasswordToken);
+ String tableName = getUniqueNames(1)[0];
+ FileSystem fs = getFileSystem();
+ Path p = new Path(dir, "tmp");
+ if (fs.exists(p)) {
+ fs.delete(p, true);
+ }
+ goodExec(GenerateTestData.class, "--start-row", "0", "--count", "10000", "--output", dir + "/tmp/input/data");
+
+ List<String> commonArgs = new ArrayList<>(Arrays.asList(new String[] {"-i", instance, "-z", keepers, "-u", user, "--table", tableName}));
+ if (saslEnabled) {
+ commonArgs.add("--keytab");
+ commonArgs.add(keytab);
+ } else {
+ commonArgs.add("-p");
+ commonArgs.add(passwd);
+ }
+
+ List<String> args = new ArrayList<>(commonArgs);
+ goodExec(SetupTable.class, args.toArray(new String[0]));
+
+ args = new ArrayList<>(commonArgs);
+ args.addAll(Arrays.asList(new String[] {"--inputDir", dir + "/tmp/input", "--workDir", dir + "/tmp"}));
+ goodExec(BulkIngestExample.class, args.toArray(new String[0]));
+
+ args = new ArrayList<>(commonArgs);
+ args.addAll(Arrays.asList(new String[] {"--start-row", "0", "--count", "10000"}));
+ goodExec(VerifyIngest.class, args.toArray(new String[0]));
+ }
+
+ @Test
+ public void testTeraSortAndRead() throws Exception {
+ // TODO Figure out a way to run M/R with Kerberos
+ Assume.assumeTrue(getAdminToken() instanceof PasswordToken);
+ String tableName = getUniqueNames(1)[0];
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers,
+ "-u", user, "--keytab", keytab, "--splits", "4"};
+ } else {
+ args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-i", instance, "-z", keepers,
+ "-u", user, "-p", passwd, "--splits", "4"};
+ }
+ goodExec(TeraSortIngest.class, args);
+ Path output = new Path(dir, "tmp/nines");
+ if (fs.exists(output)) {
+ fs.delete(output, true);
+ }
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--rowRegex", ".*999.*", "--output",
+ output.toString()};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--rowRegex", ".*999.*", "--output", output.toString()};
+ }
+ goodExec(RegexExample.class, args);
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--column", "c:"};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--column", "c:"};
+ }
+ goodExec(RowHash.class, args);
+ output = new Path(dir, "tmp/tableFile");
+ if (fs.exists(output)) {
+ fs.delete(output, true);
+ }
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--output", output.toString()};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--output", output.toString()};
+ }
+ goodExec(TableToFile.class, args);
+ }
+
+ @Test
+ public void testWordCount() throws Exception {
+ // TODO Figure out a way to run M/R with Kerberos
+ Assume.assumeTrue(getAdminToken() instanceof PasswordToken);
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ is = new IteratorSetting(10, SummingCombiner.class);
+ SummingCombiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column(new Text("count"))));
+ SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
+ c.tableOperations().attachIterator(tableName, is);
+ fs.copyFromLocalFile(new Path(new Path(System.getProperty("user.dir")).getParent(), "README.md"), new Path(dir + "/tmp/wc/README.md"));
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-u", user, "--keytab", keytab, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName};
+ } else {
+ args = new String[] {"-i", instance, "-u", user, "-p", passwd, "-z", keepers, "--input", dir + "/tmp/wc", "-t", tableName};
+ }
+ goodExec(WordCount.class, args);
+ }
+
+ @Test
+ public void testInsertWithBatchWriterAndReadData() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName};
+ }
+ goodExec(InsertWithBatchWriter.class, args);
+ goodExec(ReadData.class, args);
+ }
+
+ @Test
+ public void testIsolatedScansWithInterference() throws Exception {
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
+ }
+ goodExec(InterferenceTest.class, args);
+ }
+
+ @Test
+ public void testScansWithInterference() throws Exception {
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", getUniqueNames(1)[0], "--iterations", "100000"};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", getUniqueNames(1)[0], "--iterations", "100000"};
+ }
+ goodExec(InterferenceTest.class, args);
+ }
+
+ @Test
+ public void testRowOperations() throws Exception {
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd};
+ }
+ goodExec(RowOperations.class, args);
+ }
+
+ @Test
+ public void testBatchWriter() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50",
+ "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "-t", tableName, "--start", "0", "--num", "100000", "--size", "50",
+ "--batchMemory", "10000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
+ }
+ goodExec(SequentialBatchWriter.class, args);
+
+ }
+
+ @Test
+ public void testReadWriteAndDelete() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "--createtable", "-c",
+ "--debug"};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "--createtable", "-c", "--debug"};
+ }
+ goodExec(ReadWriteExample.class, args);
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--auths", auths, "--table", tableName, "-d", "--debug"};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--auths", auths, "--table", tableName, "-d", "--debug"};
+ }
+ goodExec(ReadWriteExample.class, args);
+
+ }
+
+ @Test
+ public void testRandomBatchesAndFlush() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ String[] args;
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "100000", "--min", "0", "--max",
+ "100000", "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "100000", "--min", "0", "--max", "100000",
+ "--size", "100", "--batchMemory", "1000000", "--batchLatency", "1000", "--batchThreads", "4", "--vis", visibility};
+ }
+ goodExec(RandomBatchWriter.class, args);
+
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName, "--num", "10000", "--min", "0", "--max",
+ "100000", "--size", "100", "--scanThreads", "4", "--auths", auths};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName, "--num", "10000", "--min", "0", "--max", "100000",
+ "--size", "100", "--scanThreads", "4", "--auths", auths};
+ }
+ goodExec(RandomBatchScanner.class, args);
+
+ if (saslEnabled) {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "--keytab", keytab, "--table", tableName};
+ } else {
+ args = new String[] {"-i", instance, "-z", keepers, "-u", user, "-p", passwd, "--table", tableName};
+ }
+ goodExec(Flush.class, args);
+ }
+
+ private void goodExec(Class<?> theClass, String... args) throws InterruptedException, IOException {
+ Entry<Integer,String> pair;
+ if (Tool.class.isAssignableFrom(theClass) && ClusterType.STANDALONE == getClusterType()) {
+ StandaloneClusterControl control = (StandaloneClusterControl) getClusterControl();
+ pair = control.execMapreduceWithStdout(theClass, args);
+ } else {
+ // We're already slurping stdout into memory (not redirecting to file). Might as well add it to error message.
+ pair = getClusterControl().execWithStdout(theClass, args);
+ }
+ Assert.assertEquals("stdout=" + pair.getValue(), 0, pair.getKey().intValue());
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationIT.java b/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationIT.java
new file mode 100644
index 0000000..b75a74e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationIT.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+/**
+ * See ACCUMULO-779
+ */
+public class FateStarvationIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void run() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ c.tableOperations().create(tableName);
+
+ c.tableOperations().addSplits(tableName, TestIngest.getSplitPoints(0, 100000, 50));
+
+ TestIngest.Opts opts = new TestIngest.Opts();
+ opts.random = 89;
+ opts.timestamp = 7;
+ opts.dataSize = 50;
+ opts.rows = 100000;
+ opts.cols = 1;
+ opts.setTableName(tableName);
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConf);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ }
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+
+ c.tableOperations().flush(tableName, null, null, true);
+
+ List<Text> splits = new ArrayList<Text>(TestIngest.getSplitPoints(0, 100000, 67));
+ Random rand = new Random();
+
+ for (int i = 0; i < 100; i++) {
+ int idx1 = rand.nextInt(splits.size() - 1);
+ int idx2 = rand.nextInt(splits.size() - (idx1 + 1)) + idx1 + 1;
+
+ c.tableOperations().compact(tableName, splits.get(idx1), splits.get(idx2), false, false);
+ }
+
+ c.tableOperations().offline(tableName);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java b/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
new file mode 100644
index 0000000..05d0562
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
@@ -0,0 +1,192 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertFalse;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.LogWriter;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+
+import com.google.common.collect.Iterators;
+
+public class FunctionalTestUtils {
+
+ public static int countRFiles(Connector c, String tableName) throws Exception {
+ Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ String tableId = c.tableOperations().tableIdMap().get(tableName);
+ scanner.setRange(MetadataSchema.TabletsSection.getRange(tableId));
+ scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+
+ return Iterators.size(scanner.iterator());
+ }
+
+ static void checkRFiles(Connector c, String tableName, int minTablets, int maxTablets, int minRFiles, int maxRFiles) throws Exception {
+ Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ String tableId = c.tableOperations().tableIdMap().get(tableName);
+ scanner.setRange(new Range(new Text(tableId + ";"), true, new Text(tableId + "<"), true));
+ scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+ MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
+
+ HashMap<Text,Integer> tabletFileCounts = new HashMap<Text,Integer>();
+
+ for (Entry<Key,Value> entry : scanner) {
+
+ Text row = entry.getKey().getRow();
+
+ Integer count = tabletFileCounts.get(row);
+ if (count == null)
+ count = 0;
+ if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
+ count = count + 1;
+ }
+
+ tabletFileCounts.put(row, count);
+ }
+
+ if (tabletFileCounts.size() < minTablets || tabletFileCounts.size() > maxTablets) {
+ throw new Exception("Did not find expected number of tablets " + tabletFileCounts.size());
+ }
+
+ Set<Entry<Text,Integer>> es = tabletFileCounts.entrySet();
+ for (Entry<Text,Integer> entry : es) {
+ if (entry.getValue() > maxRFiles || entry.getValue() < minRFiles) {
+ throw new Exception("tablet " + entry.getKey() + " has " + entry.getValue() + " map files");
+ }
+ }
+ }
+
+ static public void bulkImport(Connector c, FileSystem fs, String table, String dir) throws Exception {
+ String failDir = dir + "_failures";
+ Path failPath = new Path(failDir);
+ fs.delete(failPath, true);
+ fs.mkdirs(failPath);
+
+ // Ensure server can read/modify files
+ FsShell fsShell = new FsShell(fs.getConf());
+ Assert.assertEquals("Failed to chmod " + dir, 0, fsShell.run(new String[] {"-chmod", "-R", "777", dir}));
+ Assert.assertEquals("Failed to chmod " + failDir, 0, fsShell.run(new String[] {"-chmod", "-R", "777", failDir}));
+
+ c.tableOperations().importDirectory(table, dir, failDir, false);
+
+ if (fs.listStatus(failPath).length > 0) {
+ throw new Exception("Some files failed to bulk import");
+ }
+
+ }
+
+ static public void checkSplits(Connector c, String table, int min, int max) throws Exception {
+ Collection<Text> splits = c.tableOperations().listSplits(table);
+ if (splits.size() < min || splits.size() > max) {
+ throw new Exception("# of table splits points out of range, #splits=" + splits.size() + " table=" + table + " min=" + min + " max=" + max);
+ }
+ }
+
+ static public void createRFiles(final Connector c, FileSystem fs, String path, int rows, int splits, int threads) throws Exception {
+ fs.delete(new Path(path), true);
+ ExecutorService threadPool = Executors.newFixedThreadPool(threads);
+ final AtomicBoolean fail = new AtomicBoolean(false);
+ for (int i = 0; i < rows; i += rows / splits) {
+ final TestIngest.Opts opts = new TestIngest.Opts();
+ opts.outputFile = String.format("%s/mf%s", path, i);
+ opts.random = 56;
+ opts.timestamp = 1;
+ opts.dataSize = 50;
+ opts.rows = rows / splits;
+ opts.startRow = i;
+ opts.cols = 1;
+ threadPool.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+ } catch (Exception e) {
+ fail.set(true);
+ }
+ }
+ });
+ }
+ threadPool.shutdown();
+ threadPool.awaitTermination(1, TimeUnit.HOURS);
+ assertFalse(fail.get());
+ }
+
+ static public String readAll(InputStream is) throws IOException {
+ byte[] buffer = new byte[4096];
+ StringBuffer result = new StringBuffer();
+ while (true) {
+ int n = is.read(buffer);
+ if (n <= 0)
+ break;
+ result.append(new String(buffer, 0, n));
+ }
+ return result.toString();
+ }
+
+ public static String readAll(MiniAccumuloClusterImpl c, Class<?> klass, Process p) throws Exception {
+ for (LogWriter writer : c.getLogWriters())
+ writer.flush();
+ return readAll(new FileInputStream(c.getConfig().getLogDir() + "/" + klass.getSimpleName() + "_" + p.hashCode() + ".out"));
+ }
+
+ static Mutation nm(String row, String cf, String cq, Value value) {
+ Mutation m = new Mutation(new Text(row));
+ m.put(new Text(cf), new Text(cq), value);
+ return m;
+ }
+
+ static Mutation nm(String row, String cf, String cq, String value) {
+ return nm(row, cf, cq, new Value(value.getBytes()));
+ }
+
+ public static SortedSet<Text> splits(String[] splits) {
+ SortedSet<Text> result = new TreeSet<Text>();
+ for (String split : splits)
+ result.add(new Text(split));
+ return result;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java b/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
new file mode 100644
index 0000000..a73f239
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
@@ -0,0 +1,301 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.ServerServices;
+import org.apache.accumulo.core.util.ServerServices.Service;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.ZooLock;
+import org.apache.accumulo.gc.SimpleGarbageCollector;
+import org.apache.accumulo.minicluster.MemoryUnit;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessNotFoundException;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.KeeperException.NoNodeException;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class GarbageCollectorIT extends ConfigurableMacBase {
+ private static final String OUR_SECRET = "itsreallysecret";
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 5 * 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ cfg.setProperty(Property.INSTANCE_SECRET, OUR_SECRET);
+ cfg.setProperty(Property.GC_CYCLE_START, "1");
+ cfg.setProperty(Property.GC_CYCLE_DELAY, "1");
+ cfg.setProperty(Property.GC_PORT, "0");
+ cfg.setProperty(Property.TSERV_MAXMEM, "5K");
+ cfg.setProperty(Property.TSERV_MAJC_DELAY, "1");
+
+ // use raw local file system so walogs sync and flush will work
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ private void killMacGc() throws ProcessNotFoundException, InterruptedException, KeeperException {
+ // kill gc started by MAC
+ getCluster().killProcess(ServerType.GARBAGE_COLLECTOR, getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR).iterator().next());
+ // delete lock in zookeeper if there, this will allow next GC to start quickly
+ String path = ZooUtil.getRoot(new ZooKeeperInstance(getCluster().getClientConfig())) + Constants.ZGC_LOCK;
+ ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET);
+ try {
+ ZooLock.deleteLock(zk, path);
+ } catch (IllegalStateException e) {
+
+ }
+
+ assertNull(getCluster().getProcesses().get(ServerType.GARBAGE_COLLECTOR));
+ }
+
+ @Test
+ public void gcTest() throws Exception {
+ killMacGc();
+ Connector c = getConnector();
+ c.tableOperations().create("test_ingest");
+ c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
+ TestIngest.Opts opts = new TestIngest.Opts();
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+ vopts.rows = opts.rows = 10000;
+ vopts.cols = opts.cols = 1;
+ opts.setPrincipal("root");
+ vopts.setPrincipal("root");
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+ c.tableOperations().compact("test_ingest", null, null, true, true);
+ int before = countFiles();
+ while (true) {
+ UtilWaitThread.sleep(1000);
+ int more = countFiles();
+ if (more <= before)
+ break;
+ before = more;
+ }
+
+ // restart GC
+ getCluster().start();
+ UtilWaitThread.sleep(15 * 1000);
+ int after = countFiles();
+ VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+ assertTrue(after < before);
+ }
+
+ @Test
+ public void gcLotsOfCandidatesIT() throws Exception {
+ killMacGc();
+
+ log.info("Filling metadata table with bogus delete flags");
+ Connector c = getConnector();
+ addEntries(c, new BatchWriterOpts());
+ cluster.getConfig().setDefaultMemory(10, MemoryUnit.MEGABYTE);
+ Process gc = cluster.exec(SimpleGarbageCollector.class);
+ UtilWaitThread.sleep(20 * 1000);
+ String output = FunctionalTestUtils.readAll(cluster, SimpleGarbageCollector.class, gc);
+ gc.destroy();
+ assertTrue(output.contains("delete candidates has exceeded"));
+ }
+
+ @Test
+ public void dontGCRootLog() throws Exception {
+ killMacGc();
+ // dirty metadata
+ Connector c = getConnector();
+ String table = getUniqueNames(1)[0];
+ c.tableOperations().create(table);
+ // let gc run for a bit
+ cluster.start();
+ UtilWaitThread.sleep(20 * 1000);
+ killMacGc();
+ // kill tservers
+ for (ProcessReference ref : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, ref);
+ }
+ // run recovery
+ cluster.start();
+ // did it recover?
+ Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ Iterators.size(scanner.iterator());
+ }
+
+ private Mutation createDelMutation(String path, String cf, String cq, String val) {
+ Text row = new Text(MetadataSchema.DeletesSection.getRowPrefix() + path);
+ Mutation delFlag = new Mutation(row);
+ delFlag.put(cf, cq, val);
+ return delFlag;
+ }
+
+ @Test
+ public void testInvalidDelete() throws Exception {
+ killMacGc();
+
+ String table = getUniqueNames(1)[0];
+ getConnector().tableOperations().create(table);
+
+ BatchWriter bw2 = getConnector().createBatchWriter(table, new BatchWriterConfig());
+ Mutation m1 = new Mutation("r1");
+ m1.put("cf1", "cq1", "v1");
+ bw2.addMutation(m1);
+ bw2.close();
+
+ getConnector().tableOperations().flush(table, null, null, true);
+
+ // ensure an invalid delete entry does not cause GC to go berserk ACCUMULO-2520
+ getConnector().securityOperations().grantTablePermission(getConnector().whoami(), MetadataTable.NAME, TablePermission.WRITE);
+ BatchWriter bw3 = getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+
+ bw3.addMutation(createDelMutation("", "", "", ""));
+ bw3.addMutation(createDelMutation("", "testDel", "test", "valueTest"));
+ bw3.addMutation(createDelMutation("/", "", "", ""));
+ bw3.close();
+
+ Process gc = cluster.exec(SimpleGarbageCollector.class);
+ try {
+ String output = "";
+ while (!output.contains("Ingoring invalid deletion candidate")) {
+ UtilWaitThread.sleep(250);
+ try {
+ output = FunctionalTestUtils.readAll(cluster, SimpleGarbageCollector.class, gc);
+ } catch (IOException ioe) {
+ log.error("Could not read all from cluster.", ioe);
+ }
+ }
+ } finally {
+ gc.destroy();
+ }
+
+ Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+ assertTrue(iter.hasNext());
+ Entry<Key,Value> entry = iter.next();
+ Assert.assertEquals("r1", entry.getKey().getRow().toString());
+ Assert.assertEquals("cf1", entry.getKey().getColumnFamily().toString());
+ Assert.assertEquals("cq1", entry.getKey().getColumnQualifier().toString());
+ Assert.assertEquals("v1", entry.getValue().toString());
+ Assert.assertFalse(iter.hasNext());
+ }
+
+ @Test
+ public void testProperPortAdvertisement() throws Exception {
+
+ Connector conn = getConnector();
+ Instance instance = conn.getInstance();
+
+ ZooReaderWriter zk = new ZooReaderWriter(cluster.getZooKeepers(), 30000, OUR_SECRET);
+ String path = ZooUtil.getRoot(instance) + Constants.ZGC_LOCK;
+ for (int i = 0; i < 5; i++) {
+ List<String> locks;
+ try {
+ locks = zk.getChildren(path, null);
+ } catch (NoNodeException e) {
+ Thread.sleep(5000);
+ continue;
+ }
+
+ if (locks != null && locks.size() > 0) {
+ Collections.sort(locks);
+
+ String lockPath = path + "/" + locks.get(0);
+
+ String gcLoc = new String(zk.getData(lockPath, null));
+
+ Assert.assertTrue("Found unexpected data in zookeeper for GC location: " + gcLoc, gcLoc.startsWith(Service.GC_CLIENT.name()));
+ int loc = gcLoc.indexOf(ServerServices.SEPARATOR_CHAR);
+ Assert.assertNotEquals("Could not find split point of GC location for: " + gcLoc, -1, loc);
+ String addr = gcLoc.substring(loc + 1);
+
+ int addrSplit = addr.indexOf(':');
+ Assert.assertNotEquals("Could not find split of GC host:port for: " + addr, -1, addrSplit);
+
+ String host = addr.substring(0, addrSplit), port = addr.substring(addrSplit + 1);
+ // We shouldn't have the "bindall" address in zk
+ Assert.assertNotEquals("0.0.0.0", host);
+ // Nor should we have the "random port" in zk
+ Assert.assertNotEquals(0, Integer.parseInt(port));
+ return;
+ }
+
+ Thread.sleep(5000);
+ }
+
+ Assert.fail("Could not find advertised GC address");
+ }
+
+ private int countFiles() throws Exception {
+ FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+ Path path = new Path(cluster.getConfig().getDir() + "/accumulo/tables/1/*/*.rf");
+ return Iterators.size(Arrays.asList(fs.globStatus(path)).iterator());
+ }
+
+ public static void addEntries(Connector conn, BatchWriterOpts bwOpts) throws Exception {
+ conn.securityOperations().grantTablePermission(conn.whoami(), MetadataTable.NAME, TablePermission.WRITE);
+ BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
+
+ for (int i = 0; i < 100000; ++i) {
+ final Text emptyText = new Text("");
+ Text row = new Text(String.format("%s/%020d/%s", MetadataSchema.DeletesSection.getRowPrefix(), i,
+ "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj"));
+ Mutation delFlag = new Mutation(row);
+ delFlag.put(emptyText, emptyText, new Value(new byte[] {}));
+ bw.addMutation(delFlag);
+ }
+ bw.close();
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java b/test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
new file mode 100644
index 0000000..59d8259
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Map;
+
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.Daemon;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.start.Main;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.accumulo.tserver.TabletServer;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+public class HalfDeadTServerIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "15s");
+ cfg.setProperty(Property.GENERAL_RPC_TIMEOUT, "5s");
+ cfg.useMiniDFS(true);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ class DumpOutput extends Daemon {
+
+ private final BufferedReader rdr;
+ private final StringBuilder output;
+
+ DumpOutput(InputStream is) {
+ rdr = new BufferedReader(new InputStreamReader(is));
+ output = new StringBuilder();
+ }
+
+ @Override
+ public void run() {
+ try {
+ while (true) {
+ String line = rdr.readLine();
+ if (line == null)
+ break;
+ System.out.println(line);
+ output.append(line);
+ output.append("\n");
+ }
+ } catch (IOException ex) {
+ log.error("IOException", ex);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return output.toString();
+ }
+ }
+
+ @Test
+ public void testRecover() throws Exception {
+ test(10);
+ }
+
+ @Test
+ public void testTimeout() throws Exception {
+ String results = test(20, true);
+ if (results != null) {
+ if (!results.contains("Session expired")) {
+ log.info("Failed to find 'Session expired' in output, but TServer did die which is expected");
+ }
+ }
+ }
+
+ public String test(int seconds) throws Exception {
+ return test(seconds, false);
+ }
+
+ public String test(int seconds, boolean expectTserverDied) throws Exception {
+ if (!makeDiskFailureLibrary())
+ return null;
+ Connector c = getConnector();
+ assertEquals(1, c.instanceOperations().getTabletServers().size());
+
+ // create our own tablet server with the special test library
+ String javaHome = System.getProperty("java.home");
+ String javaBin = javaHome + File.separator + "bin" + File.separator + "java";
+ String classpath = System.getProperty("java.class.path");
+ classpath = new File(cluster.getConfig().getDir(), "conf") + File.pathSeparator + classpath;
+ String className = TabletServer.class.getName();
+ ArrayList<String> argList = new ArrayList<String>();
+ argList.addAll(Arrays.asList(javaBin, "-cp", classpath));
+ argList.addAll(Arrays.asList(Main.class.getName(), className));
+ ProcessBuilder builder = new ProcessBuilder(argList);
+ Map<String,String> env = builder.environment();
+ env.put("ACCUMULO_HOME", cluster.getConfig().getDir().getAbsolutePath());
+ env.put("ACCUMULO_LOG_DIR", cluster.getConfig().getLogDir().getAbsolutePath());
+ String trickFilename = cluster.getConfig().getLogDir().getAbsolutePath() + "/TRICK_FILE";
+ env.put("TRICK_FILE", trickFilename);
+ String libPath = System.getProperty("user.dir") + "/target/fake_disk_failure.so";
+ env.put("LD_PRELOAD", libPath);
+ env.put("DYLD_INSERT_LIBRARIES", libPath);
+ env.put("DYLD_FORCE_FLAT_NAMESPACE", "true");
+ Process ingest = null;
+ Process tserver = builder.start();
+ DumpOutput t = new DumpOutput(tserver.getInputStream());
+ try {
+ t.start();
+ UtilWaitThread.sleep(1000);
+ // don't need the regular tablet server
+ cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
+ UtilWaitThread.sleep(1000);
+ c.tableOperations().create("test_ingest");
+ assertEquals(1, c.instanceOperations().getTabletServers().size());
+ int rows = 100 * 1000;
+ ingest = cluster.exec(TestIngest.class, "-u", "root", "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", ROOT_PASSWORD, "--rows", rows
+ + "");
+ UtilWaitThread.sleep(500);
+
+ // block I/O with some side-channel trickiness
+ File trickFile = new File(trickFilename);
+ try {
+ assertTrue(trickFile.createNewFile());
+ UtilWaitThread.sleep(seconds * 1000);
+ } finally {
+ if (!trickFile.delete()) {
+ log.error("Couldn't delete " + trickFile);
+ }
+ }
+
+ if (seconds <= 10) {
+ assertEquals(0, ingest.waitFor());
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+ vopts.rows = rows;
+ vopts.setPrincipal("root");
+ VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+ } else {
+ UtilWaitThread.sleep(5 * 1000);
+ tserver.waitFor();
+ t.join();
+ tserver = null;
+ }
+ // verify the process was blocked
+ String results = t.toString();
+ assertTrue(results.contains("sleeping\nsleeping\nsleeping\n"));
+ return results;
+ } finally {
+ if (ingest != null) {
+ ingest.destroy();
+ ingest.waitFor();
+ }
+ if (tserver != null) {
+ try {
+ if (expectTserverDied) {
+ try {
+ tserver.exitValue();
+ } catch (IllegalThreadStateException e) {
+ fail("Expected TServer to kill itself, but it is still running");
+ }
+ }
+ } finally {
+ tserver.destroy();
+ tserver.waitFor();
+ t.join();
+ }
+ }
+ }
+ }
+
+ private boolean makeDiskFailureLibrary() throws Exception {
+ String root = System.getProperty("user.dir");
+ String source = root + "/src/test/c/fake_disk_failure.c";
+ String lib = root + "/target/fake_disk_failure.so";
+ String platform = System.getProperty("os.name");
+ String cmd[];
+ if (platform.equals("Darwin")) {
+ cmd = new String[] {"gcc", "-arch", "x86_64", "-arch", "i386", "-dynamiclib", "-O3", "-fPIC", source, "-o", lib};
+ } else {
+ cmd = new String[] {"gcc", "-D_GNU_SOURCE", "-Wall", "-fPIC", source, "-shared", "-o", lib, "-ldl"};
+ }
+ Process gcc = Runtime.getRuntime().exec(cmd);
+ return gcc.waitFor() == 0;
+ }
+
+}
[13/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java b/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
deleted file mode 100644
index b86fcfe..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Map;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.MonitorUtil;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.harness.AccumuloITBase;
-import org.apache.accumulo.minicluster.MiniAccumuloCluster;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ZooKeeperBindException;
-import org.apache.accumulo.test.util.CertUtils;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.Before;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * General Integration-Test base class that provides access to a {@link MiniAccumuloCluster} for testing. Tests using these typically do very disruptive things
- * to the instance, and require specific configuration. Most tests don't need this level of control and should extend {@link AccumuloClusterHarness} instead.
- */
-public class ConfigurableMacBase extends AccumuloITBase {
- public static final Logger log = LoggerFactory.getLogger(ConfigurableMacBase.class);
-
- protected MiniAccumuloClusterImpl cluster;
-
- protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {}
-
- protected void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws Exception {}
-
- protected static final String ROOT_PASSWORD = "testRootPassword1";
-
- public static void configureForEnvironment(MiniAccumuloConfigImpl cfg, Class<?> testClass, File folder) {
- if ("true".equals(System.getProperty("org.apache.accumulo.test.functional.useSslForIT"))) {
- configureForSsl(cfg, folder);
- }
- if ("true".equals(System.getProperty("org.apache.accumulo.test.functional.useCredProviderForIT"))) {
- cfg.setUseCredentialProvider(true);
- }
- }
-
- protected static void configureForSsl(MiniAccumuloConfigImpl cfg, File sslDir) {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- if ("true".equals(siteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
- // already enabled; don't mess with it
- return;
- }
-
- // create parent directories, and ensure sslDir is empty
- assertTrue(sslDir.mkdirs() || sslDir.isDirectory());
- FileUtils.deleteQuietly(sslDir);
- assertTrue(sslDir.mkdir());
-
- File rootKeystoreFile = new File(sslDir, "root-" + cfg.getInstanceName() + ".jks");
- File localKeystoreFile = new File(sslDir, "local-" + cfg.getInstanceName() + ".jks");
- File publicTruststoreFile = new File(sslDir, "public-" + cfg.getInstanceName() + ".jks");
- final String rootKeystorePassword = "root_keystore_password", truststorePassword = "truststore_password";
- try {
- new CertUtils(Property.RPC_SSL_KEYSTORE_TYPE.getDefaultValue(), "o=Apache Accumulo,cn=MiniAccumuloCluster", "RSA", 2048, "sha1WithRSAEncryption")
- .createAll(rootKeystoreFile, localKeystoreFile, publicTruststoreFile, cfg.getInstanceName(), rootKeystorePassword, cfg.getRootPassword(),
- truststorePassword);
- } catch (Exception e) {
- throw new RuntimeException("error creating MAC keystore", e);
- }
-
- siteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true");
- siteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), localKeystoreFile.getAbsolutePath());
- siteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), cfg.getRootPassword());
- siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), publicTruststoreFile.getAbsolutePath());
- siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword);
- cfg.setSiteConfig(siteConfig);
- }
-
- @Before
- public void setUp() throws Exception {
- createMiniAccumulo();
- Exception lastException = null;
- for (int i = 0; i < 3; i++) {
- try {
- cluster.start();
- return;
- } catch (ZooKeeperBindException e) {
- lastException = e;
- log.warn("Failed to start MiniAccumuloCluster, assumably due to ZooKeeper issues", lastException);
- Thread.sleep(3000);
- createMiniAccumulo();
- }
- }
- throw new RuntimeException("Failed to start MiniAccumuloCluster after three attempts", lastException);
- }
-
- private void createMiniAccumulo() throws Exception {
- // createTestDir will give us a empty directory, we don't need to clean it up ourselves
- File baseDir = createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName());
- MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, ROOT_PASSWORD);
- String nativePathInDevTree = NativeMapIT.nativeMapLocation().getAbsolutePath();
- String nativePathInMapReduce = new File(System.getProperty("user.dir")).toString();
- cfg.setNativeLibPaths(nativePathInDevTree, nativePathInMapReduce);
- cfg.setProperty(Property.GC_FILE_ARCHIVE, Boolean.TRUE.toString());
- Configuration coreSite = new Configuration(false);
- configure(cfg, coreSite);
- cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
- configureForEnvironment(cfg, getClass(), getSslDir(baseDir));
- cluster = new MiniAccumuloClusterImpl(cfg);
- if (coreSite.size() > 0) {
- File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml");
- if (csFile.exists())
- throw new RuntimeException(csFile + " already exist");
-
- OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "core-site.xml")));
- coreSite.writeXml(out);
- out.close();
- }
- beforeClusterStart(cfg);
- }
-
- @After
- public void tearDown() throws Exception {
- if (cluster != null)
- try {
- cluster.stop();
- } catch (Exception e) {
- // ignored
- }
- }
-
- protected MiniAccumuloClusterImpl getCluster() {
- return cluster;
- }
-
- protected Connector getConnector() throws AccumuloException, AccumuloSecurityException {
- return getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
- }
-
- protected Process exec(Class<?> clazz, String... args) throws IOException {
- return getCluster().exec(clazz, args);
- }
-
- protected String getMonitor() throws KeeperException, InterruptedException {
- Instance instance = new ZooKeeperInstance(getCluster().getClientConfig());
- return MonitorUtil.getLocation(instance);
- }
-
- protected ClientConfiguration getClientConfig() throws Exception {
- return new ClientConfiguration(getCluster().getConfig().getClientConfFile());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
deleted file mode 100644
index 4ef4a61..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ConstraintIT.java
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.ConstraintViolationSummary;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
-import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class ConstraintIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(ConstraintIT.class);
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 30;
- }
-
- @Test
- public void run() throws Exception {
- String[] tableNames = getUniqueNames(3);
- Connector c = getConnector();
- for (String table : tableNames) {
- c.tableOperations().create(table);
- c.tableOperations().addConstraint(table, NumericValueConstraint.class.getName());
- c.tableOperations().addConstraint(table, AlphaNumKeyConstraint.class.getName());
- }
-
- // A static sleep to just let ZK do its thing
- Thread.sleep(10 * 1000);
-
- // Then check that the client has at least gotten the updates
- for (String table : tableNames) {
- log.debug("Checking constraints on {}", table);
- Map<String,Integer> constraints = c.tableOperations().listConstraints(table);
- while (!constraints.containsKey(NumericValueConstraint.class.getName()) || !constraints.containsKey(AlphaNumKeyConstraint.class.getName())) {
- log.debug("Failed to verify constraints. Sleeping and retrying");
- Thread.sleep(2000);
- constraints = c.tableOperations().listConstraints(table);
- }
- log.debug("Verified all constraints on {}", table);
- }
-
- log.debug("Verified constraints on all tables. Running tests");
-
- test1(tableNames[0]);
-
- test2(tableNames[1], false);
- test2(tableNames[2], true);
- }
-
- private void test1(String tableName) throws Exception {
- BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
-
- Mutation mut1 = new Mutation(new Text("r1"));
- mut1.put(new Text("cf1"), new Text("cq1"), new Value("123".getBytes(UTF_8)));
-
- bw.addMutation(mut1);
-
- // should not throw any exceptions
- bw.close();
-
- bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
-
- // create a mutation with a non numeric value
- Mutation mut2 = new Mutation(new Text("r1"));
- mut2.put(new Text("cf1"), new Text("cq1"), new Value("123a".getBytes(UTF_8)));
-
- bw.addMutation(mut2);
-
- boolean sawMRE = false;
-
- try {
- bw.close();
- // should not get here
- throw new Exception("Test failed, constraint did not catch bad mutation");
- } catch (MutationsRejectedException mre) {
- sawMRE = true;
-
- // verify constraint violation summary
- List<ConstraintViolationSummary> cvsl = mre.getConstraintViolationSummaries();
-
- if (cvsl.size() != 1) {
- throw new Exception("Unexpected constraints");
- }
-
- for (ConstraintViolationSummary cvs : cvsl) {
- if (!cvs.constrainClass.equals(NumericValueConstraint.class.getName())) {
- throw new Exception("Unexpected constraint class " + cvs.constrainClass);
- }
-
- if (cvs.numberOfViolatingMutations != 1) {
- throw new Exception("Unexpected # violating mutations " + cvs.numberOfViolatingMutations);
- }
- }
- }
-
- if (!sawMRE) {
- throw new Exception("Did not see MutationsRejectedException");
- }
-
- // verify mutation did not go through
- Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
- scanner.setRange(new Range(new Text("r1")));
-
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
- Entry<Key,Value> entry = iter.next();
-
- if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
- || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123".getBytes(UTF_8)))) {
- throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
- }
-
- if (iter.hasNext()) {
- entry = iter.next();
- throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
- }
-
- // remove the numeric value constraint
- getConnector().tableOperations().removeConstraint(tableName, 2);
- UtilWaitThread.sleep(1000);
-
- // now should be able to add a non numeric value
- bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
- bw.addMutation(mut2);
- bw.close();
-
- // verify mutation went through
- iter = scanner.iterator();
- entry = iter.next();
-
- if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
- || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123a".getBytes(UTF_8)))) {
- throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
- }
-
- if (iter.hasNext()) {
- entry = iter.next();
- throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
- }
-
- // add a constraint that references a non-existant class
- getConnector().tableOperations().setProperty(tableName, Property.TABLE_CONSTRAINT_PREFIX + "1", "com.foobar.nonExistantClass");
- UtilWaitThread.sleep(1000);
-
- // add a mutation
- bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
-
- Mutation mut3 = new Mutation(new Text("r1"));
- mut3.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(UTF_8)));
-
- bw.addMutation(mut3);
-
- sawMRE = false;
-
- try {
- bw.close();
- // should not get here
- throw new Exception("Test failed, mutation went through when table had bad constraints");
- } catch (MutationsRejectedException mre) {
- sawMRE = true;
- }
-
- if (!sawMRE) {
- throw new Exception("Did not see MutationsRejectedException");
- }
-
- // verify the mutation did not go through
- iter = scanner.iterator();
- entry = iter.next();
-
- if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
- || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123a".getBytes(UTF_8)))) {
- throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
- }
-
- if (iter.hasNext()) {
- entry = iter.next();
- throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
- }
-
- // remove the bad constraint
- getConnector().tableOperations().removeConstraint(tableName, 1);
- UtilWaitThread.sleep(1000);
-
- // try the mutation again
- bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
- bw.addMutation(mut3);
- bw.close();
-
- // verify it went through
- iter = scanner.iterator();
- entry = iter.next();
-
- if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
- || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("foo".getBytes(UTF_8)))) {
- throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
- }
-
- if (iter.hasNext()) {
- entry = iter.next();
- throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
- }
- }
-
- private Mutation newMut(String row, String cf, String cq, String val) {
- Mutation mut1 = new Mutation(new Text(row));
- mut1.put(new Text(cf), new Text(cq), new Value(val.getBytes(UTF_8)));
- return mut1;
- }
-
- private void test2(String table, boolean doFlush) throws Exception {
- // test sending multiple mutations with multiple constrain violations... all of the non violating mutations
- // should go through
- int numericErrors = 2;
-
- BatchWriter bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
- bw.addMutation(newMut("r1", "cf1", "cq1", "123"));
- bw.addMutation(newMut("r1", "cf1", "cq2", "I'm a bad value"));
- if (doFlush) {
- try {
- bw.flush();
- throw new Exception("Didn't find a bad mutation");
- } catch (MutationsRejectedException mre) {
- // ignored
- try {
- bw.close();
- } catch (MutationsRejectedException ex) {
- // ignored
- }
- bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
- numericErrors = 1;
- }
- }
- bw.addMutation(newMut("r1", "cf1", "cq3", "I'm a naughty value"));
- bw.addMutation(newMut("@bad row@", "cf1", "cq2", "456"));
- bw.addMutation(newMut("r1", "cf1", "cq4", "789"));
-
- boolean sawMRE = false;
-
- try {
- bw.close();
- // should not get here
- throw new Exception("Test failed, constraint did not catch bad mutation");
- } catch (MutationsRejectedException mre) {
- System.out.println(mre);
-
- sawMRE = true;
-
- // verify constraint violation summary
- List<ConstraintViolationSummary> cvsl = mre.getConstraintViolationSummaries();
-
- if (cvsl.size() != 2) {
- throw new Exception("Unexpected constraints");
- }
-
- HashMap<String,Integer> expected = new HashMap<String,Integer>();
-
- expected.put("org.apache.accumulo.examples.simple.constraints.NumericValueConstraint", numericErrors);
- expected.put("org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint", 1);
-
- for (ConstraintViolationSummary cvs : cvsl) {
- if (expected.get(cvs.constrainClass) != cvs.numberOfViolatingMutations) {
- throw new Exception("Unexpected " + cvs.constrainClass + " " + cvs.numberOfViolatingMutations);
- }
- }
- }
-
- if (!sawMRE) {
- throw new Exception("Did not see MutationsRejectedException");
- }
-
- Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
-
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
-
- Entry<Key,Value> entry = iter.next();
-
- if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
- || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123".getBytes(UTF_8)))) {
- throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
- }
-
- entry = iter.next();
-
- if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
- || !entry.getKey().getColumnQualifier().equals(new Text("cq4")) || !entry.getValue().equals(new Value("789".getBytes(UTF_8)))) {
- throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
- }
-
- if (iter.hasNext()) {
- entry = iter.next();
- throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
- }
-
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
deleted file mode 100644
index b2373e6..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class CreateAndUseIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- private static SortedSet<Text> splits;
-
- @BeforeClass
- public static void createData() throws Exception {
- splits = new TreeSet<Text>();
-
- for (int i = 1; i < 256; i++) {
- splits.add(new Text(String.format("%08x", i << 8)));
- }
- }
-
- @Test
- public void verifyDataIsPresent() throws Exception {
- Text cf = new Text("cf1");
- Text cq = new Text("cq1");
-
- String tableName = getUniqueNames(1)[0];
- getConnector().tableOperations().create(tableName);
- getConnector().tableOperations().addSplits(tableName, splits);
- BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
-
- for (int i = 1; i < 257; i++) {
- Mutation m = new Mutation(new Text(String.format("%08x", (i << 8) - 16)));
- m.put(cf, cq, new Value(Integer.toString(i).getBytes(UTF_8)));
-
- bw.addMutation(m);
- }
-
- bw.close();
- Scanner scanner1 = getConnector().createScanner(tableName, Authorizations.EMPTY);
-
- int ei = 1;
-
- for (Entry<Key,Value> entry : scanner1) {
- Assert.assertEquals(String.format("%08x", (ei << 8) - 16), entry.getKey().getRow().toString());
- Assert.assertEquals(Integer.toString(ei), entry.getValue().toString());
-
- ei++;
- }
-
- Assert.assertEquals("Did not see expected number of rows", 257, ei);
- }
-
- @Test
- public void createTableAndScan() throws Exception {
- String table2 = getUniqueNames(1)[0];
- getConnector().tableOperations().create(table2);
- getConnector().tableOperations().addSplits(table2, splits);
- Scanner scanner2 = getConnector().createScanner(table2, Authorizations.EMPTY);
- int count = 0;
- for (Entry<Key,Value> entry : scanner2) {
- if (entry != null)
- count++;
- }
-
- if (count != 0) {
- throw new Exception("Did not see expected number of entries, count = " + count);
- }
- }
-
- @Test
- public void createTableAndBatchScan() throws Exception {
- ArrayList<Range> ranges = new ArrayList<Range>();
- for (int i = 1; i < 257; i++) {
- ranges.add(new Range(new Text(String.format("%08x", (i << 8) - 16))));
- }
-
- String table3 = getUniqueNames(1)[0];
- getConnector().tableOperations().create(table3);
- getConnector().tableOperations().addSplits(table3, splits);
- BatchScanner bs = getConnector().createBatchScanner(table3, Authorizations.EMPTY, 3);
- bs.setRanges(ranges);
- Iterator<Entry<Key,Value>> iter = bs.iterator();
- int count = Iterators.size(iter);
- bs.close();
-
- Assert.assertEquals("Did not expect to find any entries", 0, count);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
deleted file mode 100644
index 79151ee..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.junit.Test;
-
-public class CreateManyScannersIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void run() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- for (int i = 0; i < 100000; i++) {
- c.createScanner(tableName, Authorizations.EMPTY);
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
deleted file mode 100644
index b383d0a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/CredentialsIT.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.security.SecurityErrorCode;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class CredentialsIT extends AccumuloClusterHarness {
-
- private boolean saslEnabled;
- private String username;
- private String password;
- private Instance inst;
-
- @Override
- public int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Before
- public void createLocalUser() throws AccumuloException, AccumuloSecurityException {
- Connector conn = getConnector();
- inst = conn.getInstance();
-
- ClientConfiguration clientConf = cluster.getClientConfig();
- ClusterUser user = getUser(0);
- username = user.getPrincipal();
- saslEnabled = clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false);
- // Create the user if it doesn't exist
- Set<String> users = conn.securityOperations().listLocalUsers();
- if (!users.contains(username)) {
- PasswordToken passwdToken = null;
- if (!saslEnabled) {
- password = user.getPassword();
- passwdToken = new PasswordToken(password);
- }
- conn.securityOperations().createLocalUser(username, passwdToken);
- }
- }
-
- @After
- public void deleteLocalUser() throws Exception {
- if (saslEnabled) {
- ClusterUser root = getAdminUser();
- UserGroupInformation.loginUserFromKeytab(root.getPrincipal(), root.getKeytab().getAbsolutePath());
- }
- getConnector().securityOperations().dropLocalUser(username);
- }
-
- @Test
- public void testConnectorWithDestroyedToken() throws Exception {
- AuthenticationToken token = getUser(0).getToken();
- assertFalse(token.isDestroyed());
- token.destroy();
- assertTrue(token.isDestroyed());
- try {
- inst.getConnector("non_existent_user", token);
- fail();
- } catch (AccumuloSecurityException e) {
- assertTrue(e.getSecurityErrorCode().equals(SecurityErrorCode.TOKEN_EXPIRED));
- }
- }
-
- @Test
- public void testDestroyTokenBeforeRPC() throws Exception {
- AuthenticationToken token = getUser(0).getToken();
- Connector userConnector = inst.getConnector(username, token);
- Scanner scanner = userConnector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- assertFalse(token.isDestroyed());
- token.destroy();
- assertTrue(token.isDestroyed());
- try {
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
- while (iter.hasNext())
- fail();
- fail();
- } catch (Exception e) {
- assertTrue(e instanceof RuntimeException);
- assertTrue(e.getCause() instanceof AccumuloSecurityException);
- assertTrue(AccumuloSecurityException.class.cast(e.getCause()).getSecurityErrorCode().equals(SecurityErrorCode.TOKEN_EXPIRED));
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
deleted file mode 100644
index 2650c89..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-
-import java.util.Map;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Iterators;
-
-public class DeleteEverythingIT extends AccumuloClusterHarness {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1s");
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- private String majcDelay;
-
- @Before
- public void updateMajcDelay() throws Exception {
- Connector c = getConnector();
- majcDelay = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
- c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1s");
- if (getClusterType() == ClusterType.STANDALONE) {
- // Gotta wait for the cluster to get out of the default sleep value
- Thread.sleep(AccumuloConfiguration.getTimeInMillis(majcDelay));
- }
- }
-
- @After
- public void resetMajcDelay() throws Exception {
- Connector c = getConnector();
- c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
- }
-
- @Test
- public void run() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation(new Text("foo"));
- m.put(new Text("bar"), new Text("1910"), new Value("5".getBytes(UTF_8)));
- bw.addMutation(m);
- bw.flush();
-
- getConnector().tableOperations().flush(tableName, null, null, true);
-
- FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
-
- m = new Mutation(new Text("foo"));
- m.putDelete(new Text("bar"), new Text("1910"));
- bw.addMutation(m);
- bw.flush();
-
- Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
- scanner.setRange(new Range());
- int count = Iterators.size(scanner.iterator());
- assertEquals("count == " + count, 0, count);
- getConnector().tableOperations().flush(tableName, null, null, true);
-
- getConnector().tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
- UtilWaitThread.sleep(4000);
-
- FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
-
- bw.close();
-
- count = Iterables.size(scanner);
-
- if (count != 0)
- throw new Exception("count == " + count);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
deleted file mode 100644
index 79c4e60..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import org.apache.accumulo.cluster.AccumuloCluster;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ClientOpts.Password;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.TestRandomDeletes;
-import org.apache.accumulo.test.VerifyIngest;
-import org.junit.Test;
-
-import com.google.common.base.Charsets;
-
-public class DeleteIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- AuthenticationToken token = getAdminToken();
- if (token instanceof KerberosToken) {
- deleteTest(c, getCluster(), getAdminPrincipal(), null, tableName, getAdminUser().getKeytab().getAbsolutePath());
- } else if (token instanceof PasswordToken) {
- PasswordToken passwdToken = (PasswordToken) token;
- deleteTest(c, getCluster(), getAdminPrincipal(), new String(passwdToken.getPassword(), Charsets.UTF_8), tableName, null);
- }
- }
-
- public static void deleteTest(Connector c, AccumuloCluster cluster, String user, String password, String tableName, String keytab) throws Exception {
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
- TestIngest.Opts opts = new TestIngest.Opts();
- vopts.setTableName(tableName);
- opts.setTableName(tableName);
- vopts.rows = opts.rows = 1000;
- vopts.cols = opts.cols = 1;
- vopts.random = opts.random = 56;
-
- assertTrue("Expected one of password or keytab", null != password || null != keytab);
- if (null != password) {
- assertNull("Given password, expected null keytab", keytab);
- Password passwd = new Password(password);
- opts.setPassword(passwd);
- opts.setPrincipal(user);
- vopts.setPassword(passwd);
- vopts.setPrincipal(user);
- }
- if (null != keytab) {
- assertNull("Given keytab, expect null password", password);
- ClientConfiguration clientConfig = cluster.getClientConfig();
- opts.updateKerberosCredentials(clientConfig);
- vopts.updateKerberosCredentials(clientConfig);
- }
-
- BatchWriterOpts BWOPTS = new BatchWriterOpts();
- TestIngest.ingest(c, opts, BWOPTS);
-
- String[] args = null;
-
- assertTrue("Expected one of password or keytab", null != password || null != keytab);
- if (null != password) {
- assertNull("Given password, expected null keytab", keytab);
- args = new String[] {"-u", user, "-p", password, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--table", tableName};
- }
- if (null != keytab) {
- assertNull("Given keytab, expect null password", password);
- args = new String[] {"-u", user, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--table", tableName, "--keytab", keytab};
- }
-
- assertEquals(0, cluster.getClusterControl().exec(TestRandomDeletes.class, args));
- TestIngest.ingest(c, opts, BWOPTS);
- VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
deleted file mode 100644
index e4a8451..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-public class DeleteRowsIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 10 * 60;
- }
-
- private static final Logger log = LoggerFactory.getLogger(DeleteRowsIT.class);
-
- private static final int ROWS_PER_TABLET = 10;
- private static final String[] LETTERS = new String[] {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
- "u", "v", "w", "x", "y", "z"};
- static final SortedSet<Text> SPLITS = new TreeSet<Text>();
- static {
- for (String alpha : LETTERS) {
- SPLITS.add(new Text(alpha));
- }
- }
- static final List<String> ROWS = new ArrayList<String>(Arrays.asList(LETTERS));
- static {
- // put data on first and last tablet
- ROWS.add("A");
- ROWS.add("{");
- }
-
- @Test(timeout = 5 * 60 * 1000)
- public void testDeleteAllRows() throws Exception {
- Connector c = getConnector();
- String[] tableNames = this.getUniqueNames(20);
- for (String tableName : tableNames) {
- c.tableOperations().create(tableName);
- c.tableOperations().deleteRows(tableName, null, null);
- Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
- assertEquals(0, Iterators.size(scanner.iterator()));
- }
- }
-
- @Test
- public void testManyRows() throws Exception {
- // Delete ranges of rows, and verify the tablets are removed.
- int i = 0;
- // Eliminate whole tablets
- String tableName = getUniqueNames(1)[0];
- testSplit(tableName + i++, "f", "h", "abcdefijklmnopqrstuvwxyz", 260);
- // Eliminate whole tablets, partial first tablet
- testSplit(tableName + i++, "f1", "h", "abcdeff1ijklmnopqrstuvwxyz", 262);
- // Eliminate whole tablets, partial last tablet
- testSplit(tableName + i++, "f", "h1", "abcdefijklmnopqrstuvwxyz", 258);
- // Eliminate whole tablets, partial first and last tablet
- testSplit(tableName + i++, "f1", "h1", "abcdeff1ijklmnopqrstuvwxyz", 260);
- // Eliminate one tablet
- testSplit(tableName + i++, "f", "g", "abcdefhijklmnopqrstuvwxyz", 270);
- // Eliminate partial tablet, matches start split
- testSplit(tableName + i++, "f", "f1", "abcdefghijklmnopqrstuvwxyz", 278);
- // Eliminate partial tablet, matches end split
- testSplit(tableName + i++, "f1", "g", "abcdeff1hijklmnopqrstuvwxyz", 272);
- // Eliminate tablets starting at -inf
- testSplit(tableName + i++, null, "h", "ijklmnopqrstuvwxyz", 200);
- // Eliminate tablets ending at +inf
- testSplit(tableName + i++, "t", null, "abcdefghijklmnopqrst", 200);
- // Eliminate some rows inside one tablet
- testSplit(tableName + i++, "t0", "t2", "abcdefghijklmnopqrstt0uvwxyz", 278);
- // Eliminate some rows in the first tablet
- testSplit(tableName + i++, null, "A1", "abcdefghijklmnopqrstuvwxyz", 278);
- // Eliminate some rows in the last tablet
- testSplit(tableName + i++, "{1", null, "abcdefghijklmnopqrstuvwxyz{1", 272);
- // Delete everything
- testSplit(tableName + i++, null, null, "", 0);
- }
-
- private void testSplit(String table, String start, String end, String result, int entries) throws Exception {
- // Put a bunch of rows on each tablet
- Connector c = getConnector();
- c.tableOperations().create(table);
- BatchWriter bw = c.createBatchWriter(table, null);
- for (String row : ROWS) {
- for (int j = 0; j < ROWS_PER_TABLET; j++) {
- Mutation m = new Mutation(row + j);
- m.put("cf", "cq", "value");
- bw.addMutation(m);
- }
- }
- bw.flush();
- bw.close();
- // Split the table
- c.tableOperations().addSplits(table, SPLITS);
-
- Text startText = start == null ? null : new Text(start);
- Text endText = end == null ? null : new Text(end);
- c.tableOperations().deleteRows(table, startText, endText);
- Collection<Text> remainingSplits = c.tableOperations().listSplits(table);
- StringBuilder sb = new StringBuilder();
- // See that whole tablets are removed
- for (Text split : remainingSplits)
- sb.append(split.toString());
- assertEquals(result, sb.toString());
- // See that the rows are really deleted
- Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
- int count = 0;
- for (Entry<Key,Value> entry : scanner) {
- Text row = entry.getKey().getRow();
- assertTrue((startText == null || row.compareTo(startText) <= 0) || (endText == null || row.compareTo(endText) > 0));
- assertTrue(startText != null || endText != null);
- count++;
- }
- log.info("Finished table " + table);
- assertEquals(entries, count);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
deleted file mode 100644
index dcc3124..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-// attempt to reproduce ACCUMULO-315
-public class DeleteRowsSplitIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- private static final Logger log = LoggerFactory.getLogger(DeleteRowsSplitIT.class);
-
- private static final String LETTERS = "abcdefghijklmnopqrstuvwxyz";
- static final SortedSet<Text> SPLITS = new TreeSet<Text>();
- static final List<String> ROWS = new ArrayList<String>();
- static {
- for (byte b : LETTERS.getBytes(UTF_8)) {
- SPLITS.add(new Text(new byte[] {b}));
- ROWS.add(new String(new byte[] {b}, UTF_8));
- }
- }
-
- @Test
- public void run() throws Exception {
- // Delete ranges of rows, and verify the are removed
- // Do this while adding many splits
- final String tableName = getUniqueNames(1)[0];
- final Connector conn = getConnector();
-
- // Eliminate whole tablets
- for (int test = 0; test < 10; test++) {
- // create a table
- log.info("Test " + test);
- conn.tableOperations().create(tableName);
-
- // put some data in it
- fillTable(conn, tableName);
-
- // generate a random delete range
- final Text start = new Text();
- final Text end = new Text();
- generateRandomRange(start, end);
-
- // initiate the delete range
- final boolean fail[] = {false};
- Thread t = new Thread() {
- @Override
- public void run() {
- try {
- // split the table
- final SortedSet<Text> afterEnd = SPLITS.tailSet(new Text(end.toString() + "\0"));
- conn.tableOperations().addSplits(tableName, afterEnd);
- } catch (Exception ex) {
- log.error("Exception", ex);
- synchronized (fail) {
- fail[0] = true;
- }
- }
- }
- };
- t.start();
-
- UtilWaitThread.sleep(test * 2);
-
- conn.tableOperations().deleteRows(tableName, start, end);
-
- t.join();
- synchronized (fail) {
- assertTrue(!fail[0]);
- }
-
- // scan the table
- Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
- for (Entry<Key,Value> entry : scanner) {
- Text row = entry.getKey().getRow();
- assertTrue(row.compareTo(start) <= 0 || row.compareTo(end) > 0);
- }
-
- // delete the table
- conn.tableOperations().delete(tableName);
- }
- }
-
- private void generateRandomRange(Text start, Text end) {
- List<String> bunch = new ArrayList<String>(ROWS);
- Collections.shuffle(bunch);
- if (bunch.get(0).compareTo((bunch.get(1))) < 0) {
- start.set(bunch.get(0));
- end.set(bunch.get(1));
- } else {
- start.set(bunch.get(1));
- end.set(bunch.get(0));
- }
-
- }
-
- private void fillTable(Connector conn, String table) throws Exception {
- BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
- for (String row : ROWS) {
- Mutation m = new Mutation(row);
- m.put("cf", "cq", "value");
- bw.addMutation(m);
- }
- bw.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
deleted file mode 100644
index 7c94163..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertFalse;
-
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.Future;
-
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.SimpleThreadPool;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-// ACCUMULO-2361
-public class DeleteTableDuringSplitIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 15 * 60;
- }
-
- @Test
- public void test() throws Exception {
- // 96 invocations, 8 at a time
- int batches = 12, batchSize = 8;
- String[] tableNames = getUniqueNames(batches * batchSize);
- // make a bunch of tables
- for (String tableName : tableNames) {
- getConnector().tableOperations().create(tableName);
- }
- final SortedSet<Text> splits = new TreeSet<Text>();
- for (byte i = 0; i < 100; i++) {
- splits.add(new Text(new byte[] {0, 0, i}));
- }
-
- List<Future<?>> results = new ArrayList<Future<?>>();
- List<Runnable> tasks = new ArrayList<Runnable>();
- SimpleThreadPool es = new SimpleThreadPool(batchSize * 2, "concurrent-api-requests");
- for (String tableName : tableNames) {
- final String finalName = tableName;
- tasks.add(new Runnable() {
- @Override
- public void run() {
- try {
- getConnector().tableOperations().addSplits(finalName, splits);
- } catch (TableNotFoundException ex) {
- // expected, ignore
- } catch (Exception ex) {
- throw new RuntimeException(finalName, ex);
- }
- }
- });
- tasks.add(new Runnable() {
- @Override
- public void run() {
- try {
- UtilWaitThread.sleep(500);
- getConnector().tableOperations().delete(finalName);
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
- }
- });
- }
- Iterator<Runnable> itr = tasks.iterator();
- for (int batch = 0; batch < batches; batch++) {
- for (int i = 0; i < batchSize; i++) {
- Future<?> f = es.submit(itr.next());
- results.add(f);
- f = es.submit(itr.next());
- results.add(f);
- }
- for (Future<?> f : results) {
- f.get();
- }
- results.clear();
- }
- // Shut down the ES
- List<Runnable> queued = es.shutdownNow();
- Assert.assertTrue("Had more tasks to run", queued.isEmpty());
- Assert.assertFalse("Had more tasks that needed to be submitted", itr.hasNext());
- for (String tableName : tableNames) {
- assertFalse(getConnector().tableOperations().exists(tableName));
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java
deleted file mode 100644
index ca8003a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.EnumSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.harness.SharedMiniClusterBase;
-import org.junit.Test;
-
-// ACCUMULO-2880
-public class DeletedTablesDontFlushIT extends SharedMiniClusterBase {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- IteratorSetting setting = new IteratorSetting(100, SlowIterator.class);
- SlowIterator.setSleepTime(setting, 1000);
- c.tableOperations().attachIterator(tableName, setting, EnumSet.of(IteratorScope.minc));
- // let the configuration change propagate through zookeeper
- UtilWaitThread.sleep(1000);
-
- Mutation m = new Mutation("xyzzy");
- for (int i = 0; i < 100; i++) {
- m.put("cf", "" + i, new Value(new byte[] {}));
- }
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- bw.addMutation(m);
- bw.close();
- // should go fast
- c.tableOperations().delete(tableName);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DurabilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DurabilityIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DurabilityIT.java
deleted file mode 100644
index 49e004f..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/DurabilityIT.java
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-public class DurabilityIT extends ConfigurableMacBase {
- private static final Logger log = LoggerFactory.getLogger(DurabilityIT.class);
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- cfg.setNumTservers(1);
- }
-
- static final long N = 100000;
-
- private String[] init() throws Exception {
- String[] tableNames = getUniqueNames(4);
- Connector c = getConnector();
- TableOperations tableOps = c.tableOperations();
- createTable(tableNames[0]);
- createTable(tableNames[1]);
- createTable(tableNames[2]);
- createTable(tableNames[3]);
- // default is sync
- tableOps.setProperty(tableNames[1], Property.TABLE_DURABILITY.getKey(), "flush");
- tableOps.setProperty(tableNames[2], Property.TABLE_DURABILITY.getKey(), "log");
- tableOps.setProperty(tableNames[3], Property.TABLE_DURABILITY.getKey(), "none");
- return tableNames;
- }
-
- private void cleanup(String[] tableNames) throws Exception {
- Connector c = getConnector();
- for (String tableName : tableNames) {
- c.tableOperations().delete(tableName);
- }
- }
-
- private void createTable(String tableName) throws Exception {
- TableOperations tableOps = getConnector().tableOperations();
- tableOps.create(tableName);
- }
-
- @Test(timeout = 2 * 60 * 1000)
- public void testWriteSpeed() throws Exception {
- TableOperations tableOps = getConnector().tableOperations();
- String tableNames[] = init();
- // write some gunk, delete the table to keep that table from messing with the performance numbers of successive calls
- // sync
- long t0 = writeSome(tableNames[0], N);
- tableOps.delete(tableNames[0]);
- // flush
- long t1 = writeSome(tableNames[1], N);
- tableOps.delete(tableNames[1]);
- // log
- long t2 = writeSome(tableNames[2], N);
- tableOps.delete(tableNames[2]);
- // none
- long t3 = writeSome(tableNames[3], N);
- tableOps.delete(tableNames[3]);
- System.out.println(String.format("sync %d flush %d log %d none %d", t0, t1, t2, t3));
- assertTrue("flush should be faster than sync", t0 > t1);
- assertTrue("log should be faster than flush", t1 > t2);
- assertTrue("no durability should be faster than log", t2 > t3);
- }
-
- @Test(timeout = 4 * 60 * 1000)
- public void testSync() throws Exception {
- String tableNames[] = init();
- // sync table should lose nothing
- writeSome(tableNames[0], N);
- restartTServer();
- assertEquals(N, readSome(tableNames[0]));
- cleanup(tableNames);
- }
-
- @Test(timeout = 4 * 60 * 1000)
- public void testFlush() throws Exception {
- String tableNames[] = init();
- // flush table won't lose anything since we're not losing power/dfs
- writeSome(tableNames[1], N);
- restartTServer();
- assertEquals(N, readSome(tableNames[1]));
- cleanup(tableNames);
- }
-
- @Test(timeout = 4 * 60 * 1000)
- public void testLog() throws Exception {
- String tableNames[] = init();
- // we're probably going to lose something the the log setting
- writeSome(tableNames[2], N);
- restartTServer();
- long numResults = readSome(tableNames[2]);
- assertTrue("Expected " + N + " >= " + numResults, N >= numResults);
- cleanup(tableNames);
- }
-
- @Test(timeout = 4 * 60 * 1000)
- public void testNone() throws Exception {
- String tableNames[] = init();
- // probably won't get any data back without logging
- writeSome(tableNames[3], N);
- restartTServer();
- long numResults = readSome(tableNames[3]);
- assertTrue("Expected " + N + " >= " + numResults, N >= numResults);
- cleanup(tableNames);
- }
-
- @Test(timeout = 4 * 60 * 1000)
- public void testIncreaseDurability() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "none");
- writeSome(tableName, N);
- restartTServer();
- long numResults = readSome(tableName);
- assertTrue("Expected " + N + " >= " + numResults, N >= numResults);
- c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "sync");
- writeSome(tableName, N);
- restartTServer();
- assertTrue(N == readSome(tableName));
- }
-
- private static Map<String,String> map(Iterable<Entry<String,String>> entries) {
- Map<String,String> result = new HashMap<String,String>();
- for (Entry<String,String> entry : entries) {
- result.put(entry.getKey(), entry.getValue());
- }
- return result;
- }
-
- @Test(timeout = 4 * 60 * 1000)
- public void testMetaDurability() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.instanceOperations().setProperty(Property.TABLE_DURABILITY.getKey(), "none");
- Map<String,String> props = map(c.tableOperations().getProperties(MetadataTable.NAME));
- assertEquals("sync", props.get(Property.TABLE_DURABILITY.getKey()));
- c.tableOperations().create(tableName);
- props = map(c.tableOperations().getProperties(tableName));
- assertEquals("none", props.get(Property.TABLE_DURABILITY.getKey()));
- restartTServer();
- assertTrue(c.tableOperations().exists(tableName));
- }
-
- private long readSome(String table) throws Exception {
- return Iterators.size(getConnector().createScanner(table, Authorizations.EMPTY).iterator());
- }
-
- private void restartTServer() throws Exception {
- for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
- cluster.start();
- }
-
- private long writeSome(String table, long count) throws Exception {
- int iterations = 5;
- long[] attempts = new long[iterations];
- for (int attempt = 0; attempt < iterations; attempt++) {
- long now = System.currentTimeMillis();
- Connector c = getConnector();
- BatchWriter bw = c.createBatchWriter(table, null);
- for (int i = 1; i < count + 1; i++) {
- Mutation m = new Mutation("" + i);
- m.put("", "", "");
- bw.addMutation(m);
- if (i % (Math.max(1, count / 100)) == 0) {
- bw.flush();
- }
- }
- bw.close();
- attempts[attempt] = System.currentTimeMillis() - now;
- }
- Arrays.sort(attempts);
- log.info("Attempt durations: {}", Arrays.toString(attempts));
- // Return the median duration
- return attempts[2];
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
deleted file mode 100644
index 2251d4b..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.fail;
-
-import java.util.Map;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.MasterClient;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.master.thrift.MasterClientService;
-import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
-import org.apache.accumulo.core.master.thrift.TableInfo;
-import org.apache.accumulo.core.master.thrift.TabletServerStatus;
-import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-public class DynamicThreadPoolsIT extends AccumuloClusterHarness {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setNumTservers(1);
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- private String majcDelay;
-
- @Before
- public void updateMajcDelay() throws Exception {
- Connector c = getConnector();
- majcDelay = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
- c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
- if (getClusterType() == ClusterType.STANDALONE) {
- Thread.sleep(AccumuloConfiguration.getTimeInMillis(majcDelay));
- }
- }
-
- @After
- public void resetMajcDelay() throws Exception {
- Connector c = getConnector();
- c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
- }
-
- @Test
- public void test() throws Exception {
- final String[] tables = getUniqueNames(15);
- String firstTable = tables[0];
- Connector c = getConnector();
- c.instanceOperations().setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "5");
- TestIngest.Opts opts = new TestIngest.Opts();
- opts.rows = 500 * 1000;
- opts.createTable = true;
- opts.setTableName(firstTable);
- ClientConfiguration clientConf = cluster.getClientConfig();
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConf);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- }
- TestIngest.ingest(c, opts, new BatchWriterOpts());
- c.tableOperations().flush(firstTable, null, null, true);
- for (int i = 1; i < tables.length; i++)
- c.tableOperations().clone(firstTable, tables[i], true, null, null);
- UtilWaitThread.sleep(11 * 1000); // time between checks of the thread pool sizes
- Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
- for (int i = 1; i < tables.length; i++)
- c.tableOperations().compact(tables[i], null, null, true, false);
- for (int i = 0; i < 30; i++) {
- int count = 0;
- MasterClientService.Iface client = null;
- MasterMonitorInfo stats = null;
- try {
- client = MasterClient.getConnectionWithRetry(new ClientContext(c.getInstance(), creds, clientConf));
- stats = client.getMasterStats(Tracer.traceInfo(), creds.toThrift(c.getInstance()));
- } finally {
- if (client != null)
- MasterClient.close(client);
- }
- for (TabletServerStatus server : stats.tServerInfo) {
- for (TableInfo table : server.tableMap.values()) {
- count += table.majors.running;
- }
- }
- System.out.println("count " + count);
- if (count > 3)
- return;
- UtilWaitThread.sleep(500);
- }
- fail("Could not observe higher number of threads after changing the config");
- }
-}
[10/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java b/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
deleted file mode 100644
index 9175379..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.NoSuchElementException;
-import java.util.Random;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.tserver.NativeMap;
-import org.apache.hadoop.io.Text;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class NativeMapIT {
-
- private Key nk(int r) {
- return new Key(new Text(String.format("r%09d", r)));
- }
-
- private Key nk(int r, int cf, int cq, int cv, int ts, boolean deleted) {
- Key k = new Key(new Text(String.format("r%09d", r)), new Text(String.format("cf%09d", cf)), new Text(String.format("cq%09d", cq)), new Text(String.format(
- "cv%09d", cv)), ts);
-
- k.setDeleted(deleted);
-
- return k;
- }
-
- private Value nv(int v) {
- return new Value(String.format("r%09d", v).getBytes(UTF_8));
- }
-
- public static File nativeMapLocation() {
- File projectDir = new File(System.getProperty("user.dir")).getParentFile();
- File nativeMapDir = new File(projectDir, "server/native/target/accumulo-native-" + Constants.VERSION + "/accumulo-native-" + Constants.VERSION);
- return nativeMapDir;
- }
-
- @BeforeClass
- public static void setUp() {
- NativeMap.loadNativeLib(Collections.singletonList(nativeMapLocation()));
- }
-
- private void verifyIterator(int start, int end, int valueOffset, Iterator<Entry<Key,Value>> iter) {
- for (int i = start; i <= end; i++) {
- assertTrue(iter.hasNext());
- Entry<Key,Value> entry = iter.next();
- assertEquals(nk(i), entry.getKey());
- assertEquals(nv(i + valueOffset), entry.getValue());
- }
-
- assertFalse(iter.hasNext());
- }
-
- private void insertAndVerify(NativeMap nm, int start, int end, int valueOffset) {
- for (int i = start; i <= end; i++) {
- nm.put(nk(i), nv(i + valueOffset));
- }
-
- for (int i = start; i <= end; i++) {
- Value v = nm.get(nk(i));
- assertNotNull(v);
- assertEquals(nv(i + valueOffset), v);
-
- Iterator<Entry<Key,Value>> iter2 = nm.iterator(nk(i));
- assertTrue(iter2.hasNext());
- Entry<Key,Value> entry = iter2.next();
- assertEquals(nk(i), entry.getKey());
- assertEquals(nv(i + valueOffset), entry.getValue());
- }
-
- assertNull(nm.get(nk(start - 1)));
-
- assertNull(nm.get(nk(end + 1)));
-
- Iterator<Entry<Key,Value>> iter = nm.iterator();
- verifyIterator(start, end, valueOffset, iter);
-
- for (int i = start; i <= end; i++) {
- iter = nm.iterator(nk(i));
- verifyIterator(i, end, valueOffset, iter);
-
- // lookup nonexistant key that falls after existing key
- iter = nm.iterator(nk(i, 1, 1, 1, 1, false));
- verifyIterator(i + 1, end, valueOffset, iter);
- }
-
- assertEquals(end - start + 1, nm.size());
- }
-
- private void insertAndVerifyExhaustive(NativeMap nm, int num, int run) {
- for (int i = 0; i < num; i++) {
- for (int j = 0; j < num; j++) {
- for (int k = 0; k < num; k++) {
- for (int l = 0; l < num; l++) {
- for (int ts = 0; ts < num; ts++) {
- Key key = nk(i, j, k, l, ts, true);
- Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(UTF_8));
-
- nm.put(key, value);
-
- key = nk(i, j, k, l, ts, false);
- value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(UTF_8));
-
- nm.put(key, value);
- }
- }
- }
- }
- }
-
- Iterator<Entry<Key,Value>> iter = nm.iterator();
-
- for (int i = 0; i < num; i++) {
- for (int j = 0; j < num; j++) {
- for (int k = 0; k < num; k++) {
- for (int l = 0; l < num; l++) {
- for (int ts = num - 1; ts >= 0; ts--) {
- Key key = nk(i, j, k, l, ts, true);
- Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(UTF_8));
-
- assertTrue(iter.hasNext());
- Entry<Key,Value> entry = iter.next();
- assertEquals(key, entry.getKey());
- assertEquals(value, entry.getValue());
-
- key = nk(i, j, k, l, ts, false);
- value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(UTF_8));
-
- assertTrue(iter.hasNext());
- entry = iter.next();
- assertEquals(key, entry.getKey());
- assertEquals(value, entry.getValue());
- }
- }
- }
- }
- }
-
- assertFalse(iter.hasNext());
-
- for (int i = 0; i < num; i++) {
- for (int j = 0; j < num; j++) {
- for (int k = 0; k < num; k++) {
- for (int l = 0; l < num; l++) {
- for (int ts = 0; ts < num; ts++) {
- Key key = nk(i, j, k, l, ts, true);
- Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(UTF_8));
-
- assertEquals(value, nm.get(key));
-
- Iterator<Entry<Key,Value>> iter2 = nm.iterator(key);
- assertTrue(iter2.hasNext());
- Entry<Key,Value> entry = iter2.next();
- assertEquals(key, entry.getKey());
- assertEquals(value, entry.getValue());
-
- key = nk(i, j, k, l, ts, false);
- value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(UTF_8));
-
- assertEquals(value, nm.get(key));
-
- Iterator<Entry<Key,Value>> iter3 = nm.iterator(key);
- assertTrue(iter3.hasNext());
- Entry<Key,Value> entry2 = iter3.next();
- assertEquals(key, entry2.getKey());
- assertEquals(value, entry2.getValue());
- }
- }
- }
- }
- }
-
- assertEquals(num * num * num * num * num * 2, nm.size());
- }
-
- @Test
- public void test1() {
- NativeMap nm = new NativeMap();
- Iterator<Entry<Key,Value>> iter = nm.iterator();
- assertFalse(iter.hasNext());
- nm.delete();
- }
-
- @Test
- public void test2() {
- NativeMap nm = new NativeMap();
-
- insertAndVerify(nm, 1, 10, 0);
- insertAndVerify(nm, 1, 10, 1);
- insertAndVerify(nm, 1, 10, 2);
-
- nm.delete();
- }
-
- @Test
- public void test4() {
- NativeMap nm = new NativeMap();
-
- insertAndVerifyExhaustive(nm, 3, 0);
- insertAndVerifyExhaustive(nm, 3, 1);
-
- nm.delete();
- }
-
- @Test
- public void test5() {
- NativeMap nm = new NativeMap();
-
- insertAndVerify(nm, 1, 10, 0);
-
- Iterator<Entry<Key,Value>> iter = nm.iterator();
- iter.next();
-
- nm.delete();
-
- try {
- nm.put(nk(1), nv(1));
- assertTrue(false);
- } catch (IllegalStateException e) {
-
- }
-
- try {
- nm.get(nk(1));
- assertTrue(false);
- } catch (IllegalStateException e) {
-
- }
-
- try {
- nm.iterator();
- assertTrue(false);
- } catch (IllegalStateException e) {
-
- }
-
- try {
- nm.iterator(nk(1));
- assertTrue(false);
- } catch (IllegalStateException e) {
-
- }
-
- try {
- nm.size();
- assertTrue(false);
- } catch (IllegalStateException e) {
-
- }
-
- try {
- iter.next();
- assertTrue(false);
- } catch (IllegalStateException e) {
-
- }
-
- }
-
- @Test
- public void test7() {
- NativeMap nm = new NativeMap();
-
- insertAndVerify(nm, 1, 10, 0);
-
- nm.delete();
-
- try {
- nm.delete();
- assertTrue(false);
- } catch (IllegalStateException e) {
-
- }
- }
-
- @Test
- public void test8() {
- // test verifies that native map sorts keys sharing some common prefix properly
-
- NativeMap nm = new NativeMap();
-
- TreeMap<Key,Value> tm = new TreeMap<Key,Value>();
-
- tm.put(new Key(new Text("fo")), new Value(new byte[] {'0'}));
- tm.put(new Key(new Text("foo")), new Value(new byte[] {'1'}));
- tm.put(new Key(new Text("foo1")), new Value(new byte[] {'2'}));
- tm.put(new Key(new Text("foo2")), new Value(new byte[] {'3'}));
-
- for (Entry<Key,Value> entry : tm.entrySet()) {
- nm.put(entry.getKey(), entry.getValue());
- }
-
- Iterator<Entry<Key,Value>> iter = nm.iterator();
-
- for (Entry<Key,Value> entry : tm.entrySet()) {
- assertTrue(iter.hasNext());
- Entry<Key,Value> entry2 = iter.next();
-
- assertEquals(entry.getKey(), entry2.getKey());
- assertEquals(entry.getValue(), entry2.getValue());
- }
-
- assertFalse(iter.hasNext());
-
- nm.delete();
- }
-
- @Test
- public void test9() {
- NativeMap nm = new NativeMap();
-
- Iterator<Entry<Key,Value>> iter = nm.iterator();
-
- try {
- iter.next();
- assertTrue(false);
- } catch (NoSuchElementException e) {
-
- }
-
- insertAndVerify(nm, 1, 1, 0);
-
- iter = nm.iterator();
- iter.next();
-
- try {
- iter.next();
- assertTrue(false);
- } catch (NoSuchElementException e) {
-
- }
-
- nm.delete();
- }
-
- @Test
- public void test10() {
- int start = 1;
- int end = 10000;
-
- NativeMap nm = new NativeMap();
- for (int i = start; i <= end; i++) {
- nm.put(nk(i), nv(i));
- }
-
- long mem1 = nm.getMemoryUsed();
-
- for (int i = start; i <= end; i++) {
- nm.put(nk(i), nv(i));
- }
-
- long mem2 = nm.getMemoryUsed();
-
- if (mem1 != mem2) {
- throw new RuntimeException("Memory changed after inserting duplicate data " + mem1 + " " + mem2);
- }
-
- for (int i = start; i <= end; i++) {
- nm.put(nk(i), nv(i));
- }
-
- long mem3 = nm.getMemoryUsed();
-
- if (mem1 != mem3) {
- throw new RuntimeException("Memory changed after inserting duplicate data " + mem1 + " " + mem3);
- }
-
- byte bigrow[] = new byte[1000000];
- byte bigvalue[] = new byte[bigrow.length];
-
- for (int i = 0; i < bigrow.length; i++) {
- bigrow[i] = (byte) (0xff & (i % 256));
- bigvalue[i] = bigrow[i];
- }
-
- nm.put(new Key(new Text(bigrow)), new Value(bigvalue));
-
- long mem4 = nm.getMemoryUsed();
-
- Value val = nm.get(new Key(new Text(bigrow)));
- if (val == null || !val.equals(new Value(bigvalue))) {
- throw new RuntimeException("Did not get expected big value");
- }
-
- nm.put(new Key(new Text(bigrow)), new Value(bigvalue));
-
- long mem5 = nm.getMemoryUsed();
-
- if (mem4 != mem5) {
- throw new RuntimeException("Memory changed after inserting duplicate data " + mem4 + " " + mem5);
- }
-
- val = nm.get(new Key(new Text(bigrow)));
- if (val == null || !val.equals(new Value(bigvalue))) {
- throw new RuntimeException("Did not get expected big value");
- }
-
- nm.delete();
- }
-
- // random length random field
- private static byte[] rlrf(Random r, int maxLen) {
- int len = r.nextInt(maxLen);
-
- byte f[] = new byte[len];
- r.nextBytes(f);
-
- return f;
- }
-
- @Test
- public void test11() {
- NativeMap nm = new NativeMap();
-
- // insert things with varying field sizes and value sizes
-
- // generate random data
- Random r = new Random(75);
-
- ArrayList<Pair<Key,Value>> testData = new ArrayList<Pair<Key,Value>>();
-
- for (int i = 0; i < 100000; i++) {
-
- Key k = new Key(rlrf(r, 97), rlrf(r, 13), rlrf(r, 31), rlrf(r, 11), (r.nextLong() & 0x7fffffffffffffffl), false, false);
- Value v = new Value(rlrf(r, 511));
-
- testData.add(new Pair<Key,Value>(k, v));
- }
-
- // insert unsorted data
- for (Pair<Key,Value> pair : testData) {
- nm.put(pair.getFirst(), pair.getSecond());
- }
-
- for (int i = 0; i < 2; i++) {
-
- // sort data
- Collections.sort(testData, new Comparator<Pair<Key,Value>>() {
- @Override
- public int compare(Pair<Key,Value> o1, Pair<Key,Value> o2) {
- return o1.getFirst().compareTo(o2.getFirst());
- }
- });
-
- // verify
- Iterator<Entry<Key,Value>> iter1 = nm.iterator();
- Iterator<Pair<Key,Value>> iter2 = testData.iterator();
-
- while (iter1.hasNext() && iter2.hasNext()) {
- Entry<Key,Value> e = iter1.next();
- Pair<Key,Value> p = iter2.next();
-
- if (!e.getKey().equals(p.getFirst()))
- throw new RuntimeException("Keys not equal");
-
- if (!e.getValue().equals(p.getSecond()))
- throw new RuntimeException("Values not equal");
- }
-
- if (iter1.hasNext())
- throw new RuntimeException("Not all of native map consumed");
-
- if (iter2.hasNext())
- throw new RuntimeException("Not all of test data consumed");
-
- System.out.println("test 11 nm mem " + nm.getMemoryUsed());
-
- // insert data again w/ different value
- Collections.shuffle(testData, r);
- // insert unsorted data
- for (Pair<Key,Value> pair : testData) {
- pair.getSecond().set(rlrf(r, 511));
- nm.put(pair.getFirst(), pair.getSecond());
- }
- }
-
- nm.delete();
- }
-
- @Test
- public void testBinary() {
- NativeMap nm = new NativeMap();
-
- byte emptyBytes[] = new byte[0];
-
- for (int i = 0; i < 256; i++) {
- for (int j = 0; j < 256; j++) {
- byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
- byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
-
- Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
- Value v = new Value(data);
-
- nm.put(k, v);
- }
- }
-
- Iterator<Entry<Key,Value>> iter = nm.iterator();
- for (int i = 0; i < 256; i++) {
- for (int j = 0; j < 256; j++) {
- byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
- byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
-
- Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
- Value v = new Value(data);
-
- assertTrue(iter.hasNext());
- Entry<Key,Value> entry = iter.next();
-
- assertEquals(k, entry.getKey());
- assertEquals(v, entry.getValue());
-
- }
- }
-
- assertFalse(iter.hasNext());
-
- for (int i = 0; i < 256; i++) {
- for (int j = 0; j < 256; j++) {
- byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
- byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
-
- Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
- Value v = new Value(data);
-
- Value v2 = nm.get(k);
-
- assertEquals(v, v2);
- }
- }
-
- nm.delete();
- }
-
- @Test
- public void testEmpty() {
- NativeMap nm = new NativeMap();
-
- assertTrue(nm.size() == 0);
- assertTrue(nm.getMemoryUsed() == 0);
-
- nm.delete();
- }
-
- @Test
- public void testConcurrentIter() throws IOException {
- NativeMap nm = new NativeMap();
-
- nm.put(nk(0), nv(0));
- nm.put(nk(1), nv(1));
- nm.put(nk(3), nv(3));
-
- SortedKeyValueIterator<Key,Value> iter = nm.skvIterator();
-
- // modify map after iter created
- nm.put(nk(2), nv(2));
-
- assertTrue(iter.hasTop());
- assertEquals(iter.getTopKey(), nk(0));
- iter.next();
-
- assertTrue(iter.hasTop());
- assertEquals(iter.getTopKey(), nk(1));
- iter.next();
-
- assertTrue(iter.hasTop());
- assertEquals(iter.getTopKey(), nk(2));
- iter.next();
-
- assertTrue(iter.hasTop());
- assertEquals(iter.getTopKey(), nk(3));
- iter.next();
-
- assertFalse(iter.hasTop());
-
- nm.delete();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
deleted file mode 100644
index 8700891..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
+++ /dev/null
@@ -1,707 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.security.SecurityErrorCode;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.SystemPermission;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-// This test verifies the default permissions so a clean instance must be used. A shared instance might
-// not be representative of a fresh installation.
-public class PermissionsIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(PermissionsIT.class);
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Before
- public void limitToMini() throws Exception {
- Assume.assumeTrue(ClusterType.MINI == getClusterType());
- Connector c = getConnector();
- Set<String> users = c.securityOperations().listLocalUsers();
- ClusterUser user = getUser(0);
- if (users.contains(user.getPrincipal())) {
- c.securityOperations().dropLocalUser(user.getPrincipal());
- }
- }
-
- private void loginAs(ClusterUser user) throws IOException {
- // Force a re-login as the provided user
- user.getToken();
- }
-
- @Test
- public void systemPermissionsTest() throws Exception {
- ClusterUser testUser = getUser(0), rootUser = getAdminUser();
-
- // verify that the test is being run by root
- Connector c = getConnector();
- verifyHasOnlyTheseSystemPermissions(c, c.whoami(), SystemPermission.values());
-
- // create the test user
- String principal = testUser.getPrincipal();
- AuthenticationToken token = testUser.getToken();
- PasswordToken passwordToken = null;
- if (token instanceof PasswordToken) {
- passwordToken = (PasswordToken) token;
- }
- loginAs(rootUser);
- c.securityOperations().createLocalUser(principal, passwordToken);
- loginAs(testUser);
- Connector test_user_conn = c.getInstance().getConnector(principal, token);
- loginAs(rootUser);
- verifyHasNoSystemPermissions(c, principal, SystemPermission.values());
-
- // test each permission
- for (SystemPermission perm : SystemPermission.values()) {
- log.debug("Verifying the " + perm + " permission");
-
- // test permission before and after granting it
- String tableNamePrefix = getUniqueNames(1)[0];
- testMissingSystemPermission(tableNamePrefix, c, rootUser, test_user_conn, testUser, perm);
- loginAs(rootUser);
- c.securityOperations().grantSystemPermission(principal, perm);
- verifyHasOnlyTheseSystemPermissions(c, principal, perm);
- testGrantedSystemPermission(tableNamePrefix, c, rootUser, test_user_conn, testUser, perm);
- loginAs(rootUser);
- c.securityOperations().revokeSystemPermission(principal, perm);
- verifyHasNoSystemPermissions(c, principal, perm);
- }
- }
-
- static Map<String,String> map(Iterable<Entry<String,String>> i) {
- Map<String,String> result = new HashMap<String,String>();
- for (Entry<String,String> e : i) {
- result.put(e.getKey(), e.getValue());
- }
- return result;
- }
-
- private void testMissingSystemPermission(String tableNamePrefix, Connector root_conn, ClusterUser rootUser, Connector test_user_conn, ClusterUser testUser,
- SystemPermission perm) throws Exception {
- String tableName, user, password = "password", namespace;
- boolean passwordBased = testUser.getPassword() != null;
- log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
-
- // test permission prior to granting it
- switch (perm) {
- case CREATE_TABLE:
- tableName = tableNamePrefix + "__CREATE_TABLE_WITHOUT_PERM_TEST__";
- try {
- loginAs(testUser);
- test_user_conn.tableOperations().create(tableName);
- throw new IllegalStateException("Should NOT be able to create a table");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || root_conn.tableOperations().list().contains(tableName))
- throw e;
- }
- break;
- case DROP_TABLE:
- tableName = tableNamePrefix + "__DROP_TABLE_WITHOUT_PERM_TEST__";
- loginAs(rootUser);
- root_conn.tableOperations().create(tableName);
- try {
- loginAs(testUser);
- test_user_conn.tableOperations().delete(tableName);
- throw new IllegalStateException("Should NOT be able to delete a table");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.tableOperations().list().contains(tableName))
- throw e;
- }
- break;
- case ALTER_TABLE:
- tableName = tableNamePrefix + "__ALTER_TABLE_WITHOUT_PERM_TEST__";
- loginAs(rootUser);
- root_conn.tableOperations().create(tableName);
- try {
- loginAs(testUser);
- test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
- throw new IllegalStateException("Should NOT be able to set a table property");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
- || map(root_conn.tableOperations().getProperties(tableName)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
- throw e;
- }
- loginAs(rootUser);
- root_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
- try {
- loginAs(testUser);
- test_user_conn.tableOperations().removeProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey());
- throw new IllegalStateException("Should NOT be able to remove a table property");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
- || !map(root_conn.tableOperations().getProperties(tableName)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
- throw e;
- }
- String table2 = tableName + "2";
- try {
- loginAs(testUser);
- test_user_conn.tableOperations().rename(tableName, table2);
- throw new IllegalStateException("Should NOT be able to rename a table");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.tableOperations().list().contains(tableName)
- || root_conn.tableOperations().list().contains(table2))
- throw e;
- }
- break;
- case CREATE_USER:
- user = "__CREATE_USER_WITHOUT_PERM_TEST__";
- try {
- loginAs(testUser);
- test_user_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
- throw new IllegalStateException("Should NOT be able to create a user");
- } catch (AccumuloSecurityException e) {
- AuthenticationToken userToken = testUser.getToken();
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
- || (userToken instanceof PasswordToken && root_conn.securityOperations().authenticateUser(user, userToken)))
- throw e;
- }
- break;
- case DROP_USER:
- user = "__DROP_USER_WITHOUT_PERM_TEST__";
- loginAs(rootUser);
- root_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
- try {
- loginAs(testUser);
- test_user_conn.securityOperations().dropLocalUser(user);
- throw new IllegalStateException("Should NOT be able to delete a user");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.securityOperations().listLocalUsers().contains(user)) {
- log.info("Failed to authenticate as " + user);
- throw e;
- }
- }
- break;
- case ALTER_USER:
- user = "__ALTER_USER_WITHOUT_PERM_TEST__";
- loginAs(rootUser);
- root_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
- try {
- loginAs(testUser);
- test_user_conn.securityOperations().changeUserAuthorizations(user, new Authorizations("A", "B"));
- throw new IllegalStateException("Should NOT be able to alter a user");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
- throw e;
- }
- break;
- case SYSTEM:
- // test for system permission would go here
- break;
- case CREATE_NAMESPACE:
- namespace = "__CREATE_NAMESPACE_WITHOUT_PERM_TEST__";
- try {
- loginAs(testUser);
- test_user_conn.namespaceOperations().create(namespace);
- throw new IllegalStateException("Should NOT be able to create a namespace");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || root_conn.namespaceOperations().list().contains(namespace))
- throw e;
- }
- break;
- case DROP_NAMESPACE:
- namespace = "__DROP_NAMESPACE_WITHOUT_PERM_TEST__";
- loginAs(rootUser);
- root_conn.namespaceOperations().create(namespace);
- try {
- loginAs(testUser);
- test_user_conn.namespaceOperations().delete(namespace);
- throw new IllegalStateException("Should NOT be able to delete a namespace");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.namespaceOperations().list().contains(namespace))
- throw e;
- }
- break;
- case ALTER_NAMESPACE:
- namespace = "__ALTER_NAMESPACE_WITHOUT_PERM_TEST__";
- loginAs(rootUser);
- root_conn.namespaceOperations().create(namespace);
- try {
- loginAs(testUser);
- test_user_conn.namespaceOperations().setProperty(namespace, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
- throw new IllegalStateException("Should NOT be able to set a namespace property");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
- || map(root_conn.namespaceOperations().getProperties(namespace)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
- throw e;
- }
- loginAs(rootUser);
- root_conn.namespaceOperations().setProperty(namespace, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
- try {
- loginAs(testUser);
- test_user_conn.namespaceOperations().removeProperty(namespace, Property.TABLE_BLOOM_ERRORRATE.getKey());
- throw new IllegalStateException("Should NOT be able to remove a namespace property");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
- || !map(root_conn.namespaceOperations().getProperties(namespace)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
- throw e;
- }
- String namespace2 = namespace + "2";
- try {
- loginAs(testUser);
- test_user_conn.namespaceOperations().rename(namespace, namespace2);
- throw new IllegalStateException("Should NOT be able to rename a namespace");
- } catch (AccumuloSecurityException e) {
- loginAs(rootUser);
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.namespaceOperations().list().contains(namespace)
- || root_conn.namespaceOperations().list().contains(namespace2))
- throw e;
- }
- break;
- case OBTAIN_DELEGATION_TOKEN:
- ClientConfiguration clientConf = cluster.getClientConfig();
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- // TODO Try to obtain a delegation token without the permission
- }
- break;
- case GRANT:
- loginAs(testUser);
- try {
- test_user_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(), SystemPermission.GRANT);
- throw new IllegalStateException("Should NOT be able to grant System.GRANT to yourself");
- } catch (AccumuloSecurityException e) {
- // Expected
- loginAs(rootUser);
- assertFalse(root_conn.securityOperations().hasSystemPermission(testUser.getPrincipal(), SystemPermission.GRANT));
- }
- break;
- default:
- throw new IllegalArgumentException("Unrecognized System Permission: " + perm);
- }
- }
-
- private void testGrantedSystemPermission(String tableNamePrefix, Connector root_conn, ClusterUser rootUser, Connector test_user_conn, ClusterUser testUser,
- SystemPermission perm) throws Exception {
- String tableName, user, password = "password", namespace;
- boolean passwordBased = testUser.getPassword() != null;
- log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
-
- // test permission after granting it
- switch (perm) {
- case CREATE_TABLE:
- tableName = tableNamePrefix + "__CREATE_TABLE_WITH_PERM_TEST__";
- loginAs(testUser);
- test_user_conn.tableOperations().create(tableName);
- loginAs(rootUser);
- if (!root_conn.tableOperations().list().contains(tableName))
- throw new IllegalStateException("Should be able to create a table");
- break;
- case DROP_TABLE:
- tableName = tableNamePrefix + "__DROP_TABLE_WITH_PERM_TEST__";
- loginAs(rootUser);
- root_conn.tableOperations().create(tableName);
- loginAs(testUser);
- test_user_conn.tableOperations().delete(tableName);
- loginAs(rootUser);
- if (root_conn.tableOperations().list().contains(tableName))
- throw new IllegalStateException("Should be able to delete a table");
- break;
- case ALTER_TABLE:
- tableName = tableNamePrefix + "__ALTER_TABLE_WITH_PERM_TEST__";
- String table2 = tableName + "2";
- loginAs(rootUser);
- root_conn.tableOperations().create(tableName);
- loginAs(testUser);
- test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
- loginAs(rootUser);
- Map<String,String> properties = map(root_conn.tableOperations().getProperties(tableName));
- if (!properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
- throw new IllegalStateException("Should be able to set a table property");
- loginAs(testUser);
- test_user_conn.tableOperations().removeProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey());
- loginAs(rootUser);
- properties = map(root_conn.tableOperations().getProperties(tableName));
- if (properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
- throw new IllegalStateException("Should be able to remove a table property");
- loginAs(testUser);
- test_user_conn.tableOperations().rename(tableName, table2);
- loginAs(rootUser);
- if (root_conn.tableOperations().list().contains(tableName) || !root_conn.tableOperations().list().contains(table2))
- throw new IllegalStateException("Should be able to rename a table");
- break;
- case CREATE_USER:
- user = "__CREATE_USER_WITH_PERM_TEST__";
- loginAs(testUser);
- test_user_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
- loginAs(rootUser);
- if (passwordBased && !root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
- throw new IllegalStateException("Should be able to create a user");
- break;
- case DROP_USER:
- user = "__DROP_USER_WITH_PERM_TEST__";
- loginAs(rootUser);
- root_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
- loginAs(testUser);
- test_user_conn.securityOperations().dropLocalUser(user);
- loginAs(rootUser);
- if (passwordBased && root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
- throw new IllegalStateException("Should be able to delete a user");
- break;
- case ALTER_USER:
- user = "__ALTER_USER_WITH_PERM_TEST__";
- loginAs(rootUser);
- root_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
- loginAs(testUser);
- test_user_conn.securityOperations().changeUserAuthorizations(user, new Authorizations("A", "B"));
- loginAs(rootUser);
- if (root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
- throw new IllegalStateException("Should be able to alter a user");
- break;
- case SYSTEM:
- // test for system permission would go here
- break;
- case CREATE_NAMESPACE:
- namespace = "__CREATE_NAMESPACE_WITH_PERM_TEST__";
- loginAs(testUser);
- test_user_conn.namespaceOperations().create(namespace);
- loginAs(rootUser);
- if (!root_conn.namespaceOperations().list().contains(namespace))
- throw new IllegalStateException("Should be able to create a namespace");
- break;
- case DROP_NAMESPACE:
- namespace = "__DROP_NAMESPACE_WITH_PERM_TEST__";
- loginAs(rootUser);
- root_conn.namespaceOperations().create(namespace);
- loginAs(testUser);
- test_user_conn.namespaceOperations().delete(namespace);
- loginAs(rootUser);
- if (root_conn.namespaceOperations().list().contains(namespace))
- throw new IllegalStateException("Should be able to delete a namespace");
- break;
- case ALTER_NAMESPACE:
- namespace = "__ALTER_NAMESPACE_WITH_PERM_TEST__";
- String namespace2 = namespace + "2";
- loginAs(rootUser);
- root_conn.namespaceOperations().create(namespace);
- loginAs(testUser);
- test_user_conn.namespaceOperations().setProperty(namespace, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
- loginAs(rootUser);
- Map<String,String> propies = map(root_conn.namespaceOperations().getProperties(namespace));
- if (!propies.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
- throw new IllegalStateException("Should be able to set a table property");
- loginAs(testUser);
- test_user_conn.namespaceOperations().removeProperty(namespace, Property.TABLE_BLOOM_ERRORRATE.getKey());
- loginAs(rootUser);
- propies = map(root_conn.namespaceOperations().getProperties(namespace));
- if (propies.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
- throw new IllegalStateException("Should be able to remove a table property");
- loginAs(testUser);
- test_user_conn.namespaceOperations().rename(namespace, namespace2);
- loginAs(rootUser);
- if (root_conn.namespaceOperations().list().contains(namespace) || !root_conn.namespaceOperations().list().contains(namespace2))
- throw new IllegalStateException("Should be able to rename a table");
- break;
- case OBTAIN_DELEGATION_TOKEN:
- ClientConfiguration clientConf = cluster.getClientConfig();
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- // TODO Try to obtain a delegation token with the permission
- }
- break;
- case GRANT:
- loginAs(rootUser);
- root_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(), SystemPermission.GRANT);
- loginAs(testUser);
- test_user_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(), SystemPermission.CREATE_TABLE);
- loginAs(rootUser);
- assertTrue("Test user should have CREATE_TABLE",
- root_conn.securityOperations().hasSystemPermission(testUser.getPrincipal(), SystemPermission.CREATE_TABLE));
- assertTrue("Test user should have GRANT", root_conn.securityOperations().hasSystemPermission(testUser.getPrincipal(), SystemPermission.GRANT));
- root_conn.securityOperations().revokeSystemPermission(testUser.getPrincipal(), SystemPermission.CREATE_TABLE);
- break;
- default:
- throw new IllegalArgumentException("Unrecognized System Permission: " + perm);
- }
- }
-
- private void verifyHasOnlyTheseSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException,
- AccumuloSecurityException {
- List<SystemPermission> permList = Arrays.asList(perms);
- for (SystemPermission p : SystemPermission.values()) {
- if (permList.contains(p)) {
- // should have these
- if (!root_conn.securityOperations().hasSystemPermission(user, p))
- throw new IllegalStateException(user + " SHOULD have system permission " + p);
- } else {
- // should not have these
- if (root_conn.securityOperations().hasSystemPermission(user, p))
- throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
- }
- }
- }
-
- private void verifyHasNoSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException, AccumuloSecurityException {
- for (SystemPermission p : perms)
- if (root_conn.securityOperations().hasSystemPermission(user, p))
- throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
- }
-
- @Test
- public void tablePermissionTest() throws Exception {
- // create the test user
- ClusterUser testUser = getUser(0), rootUser = getAdminUser();
-
- String principal = testUser.getPrincipal();
- AuthenticationToken token = testUser.getToken();
- PasswordToken passwordToken = null;
- if (token instanceof PasswordToken) {
- passwordToken = (PasswordToken) token;
- }
- loginAs(rootUser);
- Connector c = getConnector();
- c.securityOperations().createLocalUser(principal, passwordToken);
- loginAs(testUser);
- Connector test_user_conn = c.getInstance().getConnector(principal, token);
-
- // check for read-only access to metadata table
- loginAs(rootUser);
- verifyHasOnlyTheseTablePermissions(c, c.whoami(), MetadataTable.NAME, TablePermission.READ, TablePermission.ALTER_TABLE);
- verifyHasOnlyTheseTablePermissions(c, principal, MetadataTable.NAME, TablePermission.READ);
- String tableName = getUniqueNames(1)[0] + "__TABLE_PERMISSION_TEST__";
-
- // test each permission
- for (TablePermission perm : TablePermission.values()) {
- log.debug("Verifying the " + perm + " permission");
-
- // test permission before and after granting it
- createTestTable(c, principal, tableName);
- loginAs(testUser);
- testMissingTablePermission(test_user_conn, testUser, perm, tableName);
- loginAs(rootUser);
- c.securityOperations().grantTablePermission(principal, tableName, perm);
- verifyHasOnlyTheseTablePermissions(c, principal, tableName, perm);
- loginAs(testUser);
- testGrantedTablePermission(test_user_conn, testUser, perm, tableName);
-
- loginAs(rootUser);
- createTestTable(c, principal, tableName);
- c.securityOperations().revokeTablePermission(principal, tableName, perm);
- verifyHasNoTablePermissions(c, principal, tableName, perm);
- }
- }
-
- private void createTestTable(Connector c, String testUser, String tableName) throws Exception, MutationsRejectedException {
- if (!c.tableOperations().exists(tableName)) {
- // create the test table
- c.tableOperations().create(tableName);
- // put in some initial data
- BatchWriter writer = c.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation(new Text("row"));
- m.put(new Text("cf"), new Text("cq"), new Value("val".getBytes()));
- writer.addMutation(m);
- writer.close();
-
- // verify proper permissions for creator and test user
- verifyHasOnlyTheseTablePermissions(c, c.whoami(), tableName, TablePermission.values());
- verifyHasNoTablePermissions(c, testUser, tableName, TablePermission.values());
-
- }
- }
-
- private void testMissingTablePermission(Connector test_user_conn, ClusterUser testUser, TablePermission perm, String tableName) throws Exception {
- Scanner scanner;
- BatchWriter writer;
- Mutation m;
- log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
-
- // test permission prior to granting it
- switch (perm) {
- case READ:
- try {
- scanner = test_user_conn.createScanner(tableName, Authorizations.EMPTY);
- int i = 0;
- for (Entry<Key,Value> entry : scanner)
- i += 1 + entry.getKey().getRowData().length();
- if (i != 0)
- throw new IllegalStateException("Should NOT be able to read from the table");
- } catch (RuntimeException e) {
- AccumuloSecurityException se = (AccumuloSecurityException) e.getCause();
- if (se.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
- throw se;
- }
- break;
- case WRITE:
- try {
- writer = test_user_conn.createBatchWriter(tableName, new BatchWriterConfig());
- m = new Mutation(new Text("row"));
- m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
- writer.addMutation(m);
- try {
- writer.close();
- } catch (MutationsRejectedException e1) {
- if (e1.getSecurityErrorCodes().size() > 0)
- throw new AccumuloSecurityException(test_user_conn.whoami(), org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.PERMISSION_DENIED, e1);
- }
- throw new IllegalStateException("Should NOT be able to write to a table");
- } catch (AccumuloSecurityException e) {
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
- throw e;
- }
- break;
- case BULK_IMPORT:
- // test for bulk import permission would go here
- break;
- case ALTER_TABLE:
- Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
- groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
- try {
- test_user_conn.tableOperations().setLocalityGroups(tableName, groups);
- throw new IllegalStateException("User should not be able to set locality groups");
- } catch (AccumuloSecurityException e) {
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
- throw e;
- }
- break;
- case DROP_TABLE:
- try {
- test_user_conn.tableOperations().delete(tableName);
- throw new IllegalStateException("User should not be able delete the table");
- } catch (AccumuloSecurityException e) {
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
- throw e;
- }
- break;
- case GRANT:
- try {
- test_user_conn.securityOperations().grantTablePermission(getAdminPrincipal(), tableName, TablePermission.GRANT);
- throw new IllegalStateException("User should not be able grant permissions");
- } catch (AccumuloSecurityException e) {
- if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
- throw e;
- }
- break;
- default:
- throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
- }
- }
-
- private void testGrantedTablePermission(Connector test_user_conn, ClusterUser normalUser, TablePermission perm, String tableName) throws AccumuloException,
- TableExistsException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
- Scanner scanner;
- BatchWriter writer;
- Mutation m;
- log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
-
- // test permission after granting it
- switch (perm) {
- case READ:
- scanner = test_user_conn.createScanner(tableName, Authorizations.EMPTY);
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
- while (iter.hasNext())
- iter.next();
- break;
- case WRITE:
- writer = test_user_conn.createBatchWriter(tableName, new BatchWriterConfig());
- m = new Mutation(new Text("row"));
- m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
- writer.addMutation(m);
- writer.close();
- break;
- case BULK_IMPORT:
- // test for bulk import permission would go here
- break;
- case ALTER_TABLE:
- Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
- groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
- break;
- case DROP_TABLE:
- test_user_conn.tableOperations().delete(tableName);
- break;
- case GRANT:
- test_user_conn.securityOperations().grantTablePermission(getAdminPrincipal(), tableName, TablePermission.GRANT);
- break;
- default:
- throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
- }
- }
-
- private void verifyHasOnlyTheseTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
- AccumuloSecurityException {
- List<TablePermission> permList = Arrays.asList(perms);
- for (TablePermission p : TablePermission.values()) {
- if (permList.contains(p)) {
- // should have these
- if (!root_conn.securityOperations().hasTablePermission(user, table, p))
- throw new IllegalStateException(user + " SHOULD have table permission " + p + " for table " + table);
- } else {
- // should not have these
- if (root_conn.securityOperations().hasTablePermission(user, table, p))
- throw new IllegalStateException(user + " SHOULD NOT have table permission " + p + " for table " + table);
- }
- }
- }
-
- private void verifyHasNoTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
- AccumuloSecurityException {
- for (TablePermission p : perms)
- if (root_conn.securityOperations().hasTablePermission(user, table, p))
- throw new IllegalStateException(user + " SHOULD NOT have table permission " + p + " for table " + table);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
deleted file mode 100644
index 4ef2958..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ /dev/null
@@ -1,456 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.cluster.ClusterControl;
-import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.rfile.PrintInfo;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MonitorUtil;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.fate.zookeeper.ZooLock;
-import org.apache.accumulo.fate.zookeeper.ZooReader;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.TestMultiTableIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Charsets;
-import com.google.common.collect.Iterators;
-
-public class ReadWriteIT extends AccumuloClusterHarness {
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- }
-
- private static final Logger log = LoggerFactory.getLogger(ReadWriteIT.class);
-
- static final int ROWS = 200000;
- static final int COLS = 1;
- static final String COLF = "colf";
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 6 * 60;
- }
-
- @Test(expected = RuntimeException.class)
- public void invalidInstanceName() throws Exception {
- final Connector conn = getConnector();
- new ZooKeeperInstance("fake_instance_name", conn.getInstance().getZooKeepers());
- }
-
- @Test
- public void sunnyDay() throws Exception {
- // Start accumulo, create a table, insert some data, verify we can read it out.
- // Shutdown cleanly.
- log.debug("Starting Monitor");
- cluster.getClusterControl().startAllServers(ServerType.MONITOR);
- Connector connector = getConnector();
- String tableName = getUniqueNames(1)[0];
- ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
- verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS, COLS, 50, 0, tableName);
- String monitorLocation = null;
- while (null == monitorLocation) {
- monitorLocation = MonitorUtil.getLocation(getConnector().getInstance());
- if (null == monitorLocation) {
- log.debug("Could not fetch monitor HTTP address from zookeeper");
- Thread.sleep(2000);
- }
- }
- URL url = new URL("http://" + monitorLocation);
- log.debug("Fetching web page " + url);
- String result = FunctionalTestUtils.readAll(url.openStream());
- assertTrue(result.length() > 100);
- log.debug("Stopping accumulo cluster");
- ClusterControl control = cluster.getClusterControl();
- control.adminStopAll();
- ZooReader zreader = new ZooReader(connector.getInstance().getZooKeepers(), connector.getInstance().getZooKeepersSessionTimeOut());
- ZooCache zcache = new ZooCache(zreader, null);
- byte[] masterLockData;
- do {
- masterLockData = ZooLock.getLockData(zcache, ZooUtil.getRoot(connector.getInstance()) + Constants.ZMASTER_LOCK, null);
- if (null != masterLockData) {
- log.info("Master lock is still held");
- Thread.sleep(1000);
- }
- } while (null != masterLockData);
-
- control.stopAllServers(ServerType.GARBAGE_COLLECTOR);
- control.stopAllServers(ServerType.MONITOR);
- control.stopAllServers(ServerType.TRACER);
- log.debug("success!");
- // Restarting everything
- cluster.start();
- }
-
- public static void ingest(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String tableName)
- throws Exception {
- ingest(connector, clientConfig, principal, rows, cols, width, offset, COLF, tableName);
- }
-
- public static void ingest(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String colf,
- String tableName) throws Exception {
- TestIngest.Opts opts = new TestIngest.Opts();
- opts.rows = rows;
- opts.cols = cols;
- opts.dataSize = width;
- opts.startRow = offset;
- opts.columnFamily = colf;
- opts.createTable = true;
- opts.setTableName(tableName);
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConfig);
- } else {
- opts.setPrincipal(principal);
- }
-
- TestIngest.ingest(connector, opts, new BatchWriterOpts());
- }
-
- public static void verify(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String tableName)
- throws Exception {
- verify(connector, clientConfig, principal, rows, cols, width, offset, COLF, tableName);
- }
-
- private static void verify(Connector connector, ClientConfiguration clientConfig, String principal, int rows, int cols, int width, int offset, String colf,
- String tableName) throws Exception {
- ScannerOpts scannerOpts = new ScannerOpts();
- VerifyIngest.Opts opts = new VerifyIngest.Opts();
- opts.rows = rows;
- opts.cols = cols;
- opts.dataSize = width;
- opts.startRow = offset;
- opts.columnFamily = colf;
- opts.setTableName(tableName);
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConfig);
- } else {
- opts.setPrincipal(principal);
- }
-
- VerifyIngest.verifyIngest(connector, opts, scannerOpts);
- }
-
- public static String[] args(String... args) {
- return args;
- }
-
- @Test
- public void multiTableTest() throws Exception {
- // Write to multiple tables
- final String instance = cluster.getInstanceName();
- final String keepers = cluster.getZooKeepers();
- final ClusterControl control = cluster.getClusterControl();
- final String prefix = getClass().getSimpleName() + "_" + testName.getMethodName();
- ExecutorService svc = Executors.newFixedThreadPool(2);
- Future<Integer> p1 = svc.submit(new Callable<Integer>() {
- @Override
- public Integer call() {
- try {
- ClientConfiguration clientConf = cluster.getClientConfig();
- // Invocation is different for SASL. We're only logged in via this processes memory (not via some credentials cache on disk)
- // Need to pass along the keytab because of that.
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- String principal = getAdminPrincipal();
- AuthenticationToken token = getAdminToken();
- assertTrue("Expected KerberosToken, but was " + token.getClass(), token instanceof KerberosToken);
- KerberosToken kt = (KerberosToken) token;
- assertNotNull("Expected keytab in token", kt.getKeytab());
- return control.exec(
- TestMultiTableIngest.class,
- args("--count", Integer.toString(ROWS), "-i", instance, "-z", keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab().getAbsolutePath(),
- "-u", principal));
- }
-
- return control.exec(
- TestMultiTableIngest.class,
- args("--count", Integer.toString(ROWS), "-u", getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(
- ((PasswordToken) getAdminToken()).getPassword(), Charsets.UTF_8), "--tablePrefix", prefix));
- } catch (IOException e) {
- log.error("Error running MultiTableIngest", e);
- return -1;
- }
- }
- });
- Future<Integer> p2 = svc.submit(new Callable<Integer>() {
- @Override
- public Integer call() {
- try {
- ClientConfiguration clientConf = cluster.getClientConfig();
- // Invocation is different for SASL. We're only logged in via this processes memory (not via some credentials cache on disk)
- // Need to pass along the keytab because of that.
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- String principal = getAdminPrincipal();
- AuthenticationToken token = getAdminToken();
- assertTrue("Expected KerberosToken, but was " + token.getClass(), token instanceof KerberosToken);
- KerberosToken kt = (KerberosToken) token;
- assertNotNull("Expected keytab in token", kt.getKeytab());
- return control.exec(
- TestMultiTableIngest.class,
- args("--count", Integer.toString(ROWS), "--readonly", "-i", instance, "-z", keepers, "--tablePrefix", prefix, "--keytab", kt.getKeytab()
- .getAbsolutePath(), "-u", principal));
- }
-
- return control.exec(
- TestMultiTableIngest.class,
- args("--count", Integer.toString(ROWS), "--readonly", "-u", getAdminPrincipal(), "-i", instance, "-z", keepers, "-p", new String(
- ((PasswordToken) getAdminToken()).getPassword(), Charsets.UTF_8), "--tablePrefix", prefix));
- } catch (IOException e) {
- log.error("Error running MultiTableIngest", e);
- return -1;
- }
- }
- });
- svc.shutdown();
- while (!svc.isTerminated()) {
- svc.awaitTermination(15, TimeUnit.SECONDS);
- }
- assertEquals(0, p1.get().intValue());
- assertEquals(0, p2.get().intValue());
- }
-
- @Test
- public void largeTest() throws Exception {
- // write a few large values
- Connector connector = getConnector();
- String table = getUniqueNames(1)[0];
- ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2, 1, 500000, 0, table);
- verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2, 1, 500000, 0, table);
- }
-
- @Test
- public void interleaved() throws Exception {
- // read and write concurrently
- final Connector connector = getConnector();
- final String tableName = getUniqueNames(1)[0];
- interleaveTest(connector, tableName);
- }
-
- static void interleaveTest(final Connector connector, final String tableName) throws Exception {
- final AtomicBoolean fail = new AtomicBoolean(false);
- final int CHUNKSIZE = ROWS / 10;
- ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, 0, tableName);
- int i;
- for (i = 0; i < ROWS; i += CHUNKSIZE) {
- final int start = i;
- Thread verify = new Thread() {
- @Override
- public void run() {
- try {
- verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, start, tableName);
- } catch (Exception ex) {
- fail.set(true);
- }
- }
- };
- verify.start();
- ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, i + CHUNKSIZE, tableName);
- verify.join();
- assertFalse(fail.get());
- }
- verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), CHUNKSIZE, 1, 50, i, tableName);
- }
-
- public static Text t(String s) {
- return new Text(s);
- }
-
- public static Mutation m(String row, String cf, String cq, String value) {
- Mutation m = new Mutation(t(row));
- m.put(t(cf), t(cq), new Value(value.getBytes()));
- return m;
- }
-
- @Test
- public void localityGroupPerf() throws Exception {
- // verify that locality groups can make look-ups faster
- final Connector connector = getConnector();
- final String tableName = getUniqueNames(1)[0];
- connector.tableOperations().create(tableName);
- connector.tableOperations().setProperty(tableName, "table.group.g1", "colf");
- connector.tableOperations().setProperty(tableName, "table.groups.enabled", "g1");
- ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
- connector.tableOperations().compact(tableName, null, null, true, true);
- BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
- bw.addMutation(m("zzzzzzzzzzz", "colf2", "cq", "value"));
- bw.close();
- long now = System.currentTimeMillis();
- Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
- scanner.fetchColumnFamily(new Text("colf"));
- Iterators.size(scanner.iterator());
- long diff = System.currentTimeMillis() - now;
- now = System.currentTimeMillis();
- scanner = connector.createScanner(tableName, Authorizations.EMPTY);
- scanner.fetchColumnFamily(new Text("colf2"));
- Iterators.size(scanner.iterator());
- bw.close();
- long diff2 = System.currentTimeMillis() - now;
- assertTrue(diff2 < diff);
- }
-
- @Test
- public void sunnyLG() throws Exception {
- // create a locality group, write to it and ensure it exists in the RFiles that result
- final Connector connector = getConnector();
- final String tableName = getUniqueNames(1)[0];
- connector.tableOperations().create(tableName);
- Map<String,Set<Text>> groups = new TreeMap<String,Set<Text>>();
- groups.put("g1", Collections.singleton(t("colf")));
- connector.tableOperations().setLocalityGroups(tableName, groups);
- ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
- verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 2000, 1, 50, 0, tableName);
- connector.tableOperations().flush(tableName, null, null, true);
- BatchScanner bscanner = connector.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
- String tableId = connector.tableOperations().tableIdMap().get(tableName);
- bscanner.setRanges(Collections.singletonList(new Range(new Text(tableId + ";"), new Text(tableId + "<"))));
- bscanner.fetchColumnFamily(DataFileColumnFamily.NAME);
- boolean foundFile = false;
- for (Entry<Key,Value> entry : bscanner) {
- foundFile = true;
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- PrintStream newOut = new PrintStream(baos);
- PrintStream oldOut = System.out;
- try {
- System.setOut(newOut);
- List<String> args = new ArrayList<>();
- args.add(entry.getKey().getColumnQualifier().toString());
- if (ClusterType.STANDALONE == getClusterType() && cluster.getClientConfig().getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- args.add("--config");
- StandaloneAccumuloCluster sac = (StandaloneAccumuloCluster) cluster;
- String hadoopConfDir = sac.getHadoopConfDir();
- args.add(new Path(hadoopConfDir, "core-site.xml").toString());
- args.add(new Path(hadoopConfDir, "hdfs-site.xml").toString());
- }
- log.info("Invoking PrintInfo with " + args);
- PrintInfo.main(args.toArray(new String[args.size()]));
- newOut.flush();
- String stdout = baos.toString();
- assertTrue(stdout.contains("Locality group : g1"));
- assertTrue(stdout.contains("families : [colf]"));
- } finally {
- newOut.close();
- System.setOut(oldOut);
- }
- }
- bscanner.close();
- assertTrue(foundFile);
- }
-
- @Test
- public void localityGroupChange() throws Exception {
- // Make changes to locality groups and ensure nothing is lostssh
- final Connector connector = getConnector();
- String table = getUniqueNames(1)[0];
- TableOperations to = connector.tableOperations();
- to.create(table);
- String[] config = new String[] {"lg1:colf", null, "lg1:colf,xyz", "lg1:colf,xyz;lg2:c1,c2"};
- int i = 0;
- for (String cfg : config) {
- to.setLocalityGroups(table, getGroups(cfg));
- ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * (i + 1), 1, 50, ROWS * i, table);
- to.flush(table, null, null, true);
- verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), 0, 1, 50, ROWS * (i + 1), table);
- i++;
- }
- to.delete(table);
- to.create(table);
- config = new String[] {"lg1:colf", null, "lg1:colf,xyz", "lg1:colf;lg2:colf",};
- i = 1;
- for (String cfg : config) {
- ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, table);
- ingest(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, "xyz", table);
- to.setLocalityGroups(table, getGroups(cfg));
- to.flush(table, null, null, true);
- verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, table);
- verify(connector, getCluster().getClientConfig(), getAdminPrincipal(), ROWS * i, 1, 50, 0, "xyz", table);
- i++;
- }
- }
-
- private Map<String,Set<Text>> getGroups(String cfg) {
- Map<String,Set<Text>> groups = new TreeMap<String,Set<Text>>();
- if (cfg != null) {
- for (String group : cfg.split(";")) {
- String[] parts = group.split(":");
- Set<Text> cols = new HashSet<Text>();
- for (String col : parts[1].split(",")) {
- cols.add(t(col));
- }
- groups.put(parts[1], cols);
- }
- }
- return groups;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
deleted file mode 100644
index 0408aa0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.rfile.CreateEmpty;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * XXX As a part of verifying lossy recovery via inserting an empty rfile, this test deletes test table tablets. This will require write access to the backing
- * files of the test Accumulo mini cluster.
- *
- * This test should read the file location from the test harness and that file should be on the local filesystem. If you want to take a paranoid approach just
- * make sure the test user doesn't have write access to the HDFS files of any colocated live Accumulo instance or any important local filesystem files..
- */
-public class RecoveryWithEmptyRFileIT extends ConfigurableMacBase {
- private static final Logger log = LoggerFactory.getLogger(RecoveryWithEmptyRFileIT.class);
-
- static final int ROWS = 200000;
- static final int COLS = 1;
- static final String COLF = "colf";
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.useMiniDFS(true);
- }
-
- @Test
- public void replaceMissingRFile() throws Exception {
- log.info("Ingest some data, verify it was stored properly, replace an underlying rfile with an empty one and verify we can scan.");
- Connector connector = getConnector();
- String tableName = getUniqueNames(1)[0];
- ReadWriteIT.ingest(connector, cluster.getClientConfig(), "root", ROWS, COLS, 50, 0, tableName);
- ReadWriteIT.verify(connector, cluster.getClientConfig(), "root", ROWS, COLS, 50, 0, tableName);
-
- connector.tableOperations().flush(tableName, null, null, true);
- connector.tableOperations().offline(tableName, true);
-
- log.debug("Replacing rfile(s) with empty");
- Scanner meta = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- String tableId = connector.tableOperations().tableIdMap().get(tableName);
- meta.setRange(new Range(new Text(tableId + ";"), new Text(tableId + "<")));
- meta.fetchColumnFamily(DataFileColumnFamily.NAME);
- boolean foundFile = false;
- for (Entry<Key,Value> entry : meta) {
- foundFile = true;
- Path rfile = new Path(entry.getKey().getColumnQualifier().toString());
- log.debug("Removing rfile '" + rfile + "'");
- cluster.getFileSystem().delete(rfile, false);
- Process info = cluster.exec(CreateEmpty.class, rfile.toString());
- assertEquals(0, info.waitFor());
- }
- meta.close();
- assertTrue(foundFile);
-
- log.trace("invalidate cached file handles by issuing a compaction");
- connector.tableOperations().online(tableName, true);
- connector.tableOperations().compact(tableName, null, null, false, true);
-
- log.debug("make sure we can still scan");
- Scanner scan = connector.createScanner(tableName, Authorizations.EMPTY);
- scan.setRange(new Range());
- long cells = 0l;
- for (Entry<Key,Value> entry : scan) {
- if (entry != null)
- cells++;
- }
- scan.close();
- assertEquals(0l, cells);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
deleted file mode 100644
index a8c5bca..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/RegexGroupBalanceIT.java
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.accumulo.test.functional;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.master.balancer.RegexGroupBalancer;
-import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.commons.lang3.mutable.MutableInt;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.HashBasedTable;
-import com.google.common.collect.Table;
-
-public class RegexGroupBalanceIT extends ConfigurableMacBase {
-
- @Override
- public void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws Exception {
- cfg.setNumTservers(4);
- }
-
- @Test(timeout = 120000)
- public void testBalancing() throws Exception {
- Connector conn = getConnector();
- String tablename = getUniqueNames(1)[0];
- conn.tableOperations().create(tablename);
-
- SortedSet<Text> splits = new TreeSet<>();
- splits.add(new Text("01a"));
- splits.add(new Text("01m"));
- splits.add(new Text("01z"));
-
- splits.add(new Text("02a"));
- splits.add(new Text("02f"));
- splits.add(new Text("02r"));
- splits.add(new Text("02z"));
-
- splits.add(new Text("03a"));
- splits.add(new Text("03f"));
- splits.add(new Text("03m"));
- splits.add(new Text("03r"));
-
- conn.tableOperations().setProperty(tablename, RegexGroupBalancer.REGEX_PROPERTY, "(\\d\\d).*");
- conn.tableOperations().setProperty(tablename, RegexGroupBalancer.DEFAUT_GROUP_PROPERTY, "03");
- conn.tableOperations().setProperty(tablename, RegexGroupBalancer.WAIT_TIME_PROPERTY, "50ms");
- conn.tableOperations().setProperty(tablename, Property.TABLE_LOAD_BALANCER.getKey(), RegexGroupBalancer.class.getName());
-
- conn.tableOperations().addSplits(tablename, splits);
-
- while (true) {
- Thread.sleep(250);
-
- Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, tablename);
-
- boolean allGood = true;
- allGood &= checkGroup(groupLocationCounts, "01", 1, 1, 3);
- allGood &= checkGroup(groupLocationCounts, "02", 1, 1, 4);
- allGood &= checkGroup(groupLocationCounts, "03", 1, 2, 4);
- allGood &= checkTabletsPerTserver(groupLocationCounts, 3, 3, 4);
-
- if (allGood) {
- break;
- }
- }
-
- splits.clear();
- splits.add(new Text("01b"));
- splits.add(new Text("01f"));
- splits.add(new Text("01l"));
- splits.add(new Text("01r"));
- conn.tableOperations().addSplits(tablename, splits);
-
- while (true) {
- Thread.sleep(250);
-
- Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, tablename);
-
- boolean allGood = true;
- allGood &= checkGroup(groupLocationCounts, "01", 1, 2, 4);
- allGood &= checkGroup(groupLocationCounts, "02", 1, 1, 4);
- allGood &= checkGroup(groupLocationCounts, "03", 1, 2, 4);
- allGood &= checkTabletsPerTserver(groupLocationCounts, 4, 4, 4);
-
- if (allGood) {
- break;
- }
- }
-
- // merge group 01 down to one tablet
- conn.tableOperations().merge(tablename, null, new Text("01z"));
-
- while (true) {
- Thread.sleep(250);
-
- Table<String,String,MutableInt> groupLocationCounts = getCounts(conn, tablename);
-
- boolean allGood = true;
- allGood &= checkGroup(groupLocationCounts, "01", 1, 1, 1);
- allGood &= checkGroup(groupLocationCounts, "02", 1, 1, 4);
- allGood &= checkGroup(groupLocationCounts, "03", 1, 2, 4);
- allGood &= checkTabletsPerTserver(groupLocationCounts, 2, 3, 4);
-
- if (allGood) {
- break;
- }
- }
- }
-
- private boolean checkTabletsPerTserver(Table<String,String,MutableInt> groupLocationCounts, int minTabletPerTserver, int maxTabletsPerTserver,
- int totalTservser) {
- // check that each tserver has between min and max tablets
- for (Map<String,MutableInt> groups : groupLocationCounts.columnMap().values()) {
- int sum = 0;
- for (MutableInt mi : groups.values()) {
- sum += mi.intValue();
- }
-
- if (sum < minTabletPerTserver || sum > maxTabletsPerTserver) {
- return false;
- }
- }
-
- return groupLocationCounts.columnKeySet().size() == totalTservser;
- }
-
- private boolean checkGroup(Table<String,String,MutableInt> groupLocationCounts, String group, int min, int max, int tsevers) {
- Collection<MutableInt> counts = groupLocationCounts.row(group).values();
- if (counts.size() == 0) {
- return min == 0 && max == 0 && tsevers == 0;
- }
- return min == Collections.min(counts).intValue() && max == Collections.max(counts).intValue() && counts.size() == tsevers;
- }
-
- private Table<String,String,MutableInt> getCounts(Connector conn, String tablename) throws TableNotFoundException {
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
- String tableId = conn.tableOperations().tableIdMap().get(tablename);
- s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
-
- Table<String,String,MutableInt> groupLocationCounts = HashBasedTable.create();
-
- for (Entry<Key,Value> entry : s) {
- String group = entry.getKey().getRow().toString();
- if (group.endsWith("<")) {
- group = "03";
- } else {
- group = group.substring(tableId.length() + 1).substring(0, 2);
- }
- String loc = new TServerInstance(entry.getValue(), entry.getKey().getColumnQualifier()).toString();
-
- MutableInt count = groupLocationCounts.get(group, loc);
- if (count == null) {
- count = new MutableInt(0);
- groupLocationCounts.put(group, loc, count);
- }
-
- count.increment();
- }
- return groupLocationCounts;
- }
-}
[05/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/proxy/TBinaryProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/proxy/TBinaryProxyIT.java b/test/src/test/java/org/apache/accumulo/test/proxy/TBinaryProxyIT.java
deleted file mode 100644
index 6359d1e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/proxy/TBinaryProxyIT.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.proxy;
-
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.junit.BeforeClass;
-
-/**
- *
- */
-public class TBinaryProxyIT extends SimpleProxyBase {
-
- @BeforeClass
- public static void setProtocol() throws Exception {
- SimpleProxyBase.factory = new TBinaryProtocol.Factory();
- setUpProxy();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/proxy/TCompactProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/proxy/TCompactProxyIT.java b/test/src/test/java/org/apache/accumulo/test/proxy/TCompactProxyIT.java
deleted file mode 100644
index a92414a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/proxy/TCompactProxyIT.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.proxy;
-
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.junit.BeforeClass;
-
-/**
- *
- */
-public class TCompactProxyIT extends SimpleProxyBase {
-
- @BeforeClass
- public static void setProtocol() throws Exception {
- SimpleProxyBase.factory = new TCompactProtocol.Factory();
- setUpProxy();
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/proxy/TJsonProtocolProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/proxy/TJsonProtocolProxyIT.java b/test/src/test/java/org/apache/accumulo/test/proxy/TJsonProtocolProxyIT.java
deleted file mode 100644
index 5fcbf53..0000000
--- a/test/src/test/java/org/apache/accumulo/test/proxy/TJsonProtocolProxyIT.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.proxy;
-
-import org.apache.thrift.protocol.TJSONProtocol;
-import org.junit.BeforeClass;
-
-/**
- *
- */
-public class TJsonProtocolProxyIT extends SimpleProxyBase {
-
- @BeforeClass
- public static void setProtocol() throws Exception {
- SimpleProxyBase.factory = new TJSONProtocol.Factory();
- setUpProxy();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/proxy/TTupleProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/proxy/TTupleProxyIT.java b/test/src/test/java/org/apache/accumulo/test/proxy/TTupleProxyIT.java
deleted file mode 100644
index cdecf2c..0000000
--- a/test/src/test/java/org/apache/accumulo/test/proxy/TTupleProxyIT.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.proxy;
-
-import org.apache.thrift.protocol.TTupleProtocol;
-import org.junit.BeforeClass;
-
-/**
- *
- */
-public class TTupleProxyIT extends SimpleProxyBase {
-
- @BeforeClass
- public static void setProtocol() throws Exception {
- SimpleProxyBase.factory = new TTupleProtocol.Factory();
- setUpProxy();
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyClient.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyClient.java b/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyClient.java
deleted file mode 100644
index ff92795..0000000
--- a/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyClient.java
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.proxy;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import javax.security.sasl.SaslException;
-
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.iterators.user.RegExFilter;
-import org.apache.accumulo.core.rpc.UGIAssumingTransport;
-import org.apache.accumulo.proxy.Util;
-import org.apache.accumulo.proxy.thrift.AccumuloProxy;
-import org.apache.accumulo.proxy.thrift.ColumnUpdate;
-import org.apache.accumulo.proxy.thrift.Key;
-import org.apache.accumulo.proxy.thrift.ScanResult;
-import org.apache.accumulo.proxy.thrift.TimeType;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.protocol.TProtocol;
-import org.apache.thrift.protocol.TProtocolFactory;
-import org.apache.thrift.transport.TFramedTransport;
-import org.apache.thrift.transport.TSaslClientTransport;
-import org.apache.thrift.transport.TSocket;
-import org.apache.thrift.transport.TTransport;
-import org.apache.thrift.transport.TTransportException;
-
-public class TestProxyClient {
-
- protected AccumuloProxy.Client proxy;
- protected TTransport transport;
-
- public TestProxyClient(String host, int port) throws TTransportException {
- this(host, port, new TCompactProtocol.Factory());
- }
-
- public TestProxyClient(String host, int port, TProtocolFactory protoFactory) throws TTransportException {
- final TSocket socket = new TSocket(host, port);
- socket.setTimeout(600000);
- transport = new TFramedTransport(socket);
- final TProtocol protocol = protoFactory.getProtocol(transport);
- proxy = new AccumuloProxy.Client(protocol);
- transport.open();
- }
-
- public TestProxyClient(String host, int port, TProtocolFactory protoFactory, String proxyPrimary, UserGroupInformation ugi) throws SaslException,
- TTransportException {
- TSocket socket = new TSocket(host, port);
- TSaslClientTransport saslTransport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, host, Collections.singletonMap("javax.security.sasl.qop",
- "auth"), null, socket);
-
- transport = new UGIAssumingTransport(saslTransport, ugi);
-
- // UGI transport will perform the doAs for us
- transport.open();
-
- AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
- final TProtocol protocol = protoFactory.getProtocol(transport);
- proxy = factory.getClient(protocol);
- }
-
- public synchronized void close() {
- if (null != transport) {
- transport.close();
- transport = null;
- }
- }
-
- public AccumuloProxy.Client proxy() {
- return proxy;
- }
-
- public static void main(String[] args) throws Exception {
-
- TestProxyClient tpc = new TestProxyClient("localhost", 42424);
- String principal = "root";
- Map<String,String> props = new TreeMap<String,String>();
- props.put("password", "secret");
-
- System.out.println("Logging in");
- ByteBuffer login = tpc.proxy.login(principal, props);
-
- System.out.println("Creating user: ");
- if (!tpc.proxy().listLocalUsers(login).contains("testuser")) {
- tpc.proxy().createLocalUser(login, "testuser", ByteBuffer.wrap("testpass".getBytes(UTF_8)));
- }
- System.out.println("UserList: " + tpc.proxy().listLocalUsers(login));
-
- System.out.println("Listing: " + tpc.proxy().listTables(login));
-
- System.out.println("Deleting: ");
- String testTable = "testtableOMGOMGOMG";
-
- System.out.println("Creating: ");
-
- if (tpc.proxy().tableExists(login, testTable))
- tpc.proxy().deleteTable(login, testTable);
-
- tpc.proxy().createTable(login, testTable, true, TimeType.MILLIS);
-
- System.out.println("Listing: " + tpc.proxy().listTables(login));
-
- System.out.println("Writing: ");
- Date start = new Date();
- Date then = new Date();
- int maxInserts = 1000000;
- String format = "%1$05d";
- Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
- for (int i = 0; i < maxInserts; i++) {
- String result = String.format(format, i);
- ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap(("cf" + i).getBytes(UTF_8)), ByteBuffer.wrap(("cq" + i).getBytes(UTF_8)));
- update.setValue(Util.randStringBuffer(10));
- mutations.put(ByteBuffer.wrap(result.getBytes(UTF_8)), Collections.singletonList(update));
-
- if (i % 1000 == 0) {
- tpc.proxy().updateAndFlush(login, testTable, mutations);
- mutations.clear();
- }
- }
- tpc.proxy().updateAndFlush(login, testTable, mutations);
- Date end = new Date();
- System.out.println(" End of writing: " + (end.getTime() - start.getTime()));
-
- tpc.proxy().deleteTable(login, testTable);
- tpc.proxy().createTable(login, testTable, true, TimeType.MILLIS);
-
- // Thread.sleep(1000);
-
- System.out.println("Writing async: ");
- start = new Date();
- then = new Date();
- mutations.clear();
- String writer = tpc.proxy().createWriter(login, testTable, null);
- for (int i = 0; i < maxInserts; i++) {
- String result = String.format(format, i);
- Key pkey = new Key();
- pkey.setRow(result.getBytes(UTF_8));
- ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap(("cf" + i).getBytes(UTF_8)), ByteBuffer.wrap(("cq" + i).getBytes(UTF_8)));
- update.setValue(Util.randStringBuffer(10));
- mutations.put(ByteBuffer.wrap(result.getBytes(UTF_8)), Collections.singletonList(update));
- tpc.proxy().update(writer, mutations);
- mutations.clear();
- }
-
- end = new Date();
- System.out.println(" End of writing: " + (end.getTime() - start.getTime()));
- start = end;
- System.out.println("Closing...");
- tpc.proxy().closeWriter(writer);
- end = new Date();
- System.out.println(" End of closing: " + (end.getTime() - start.getTime()));
-
- System.out.println("Reading: ");
-
- String regex = "cf1.*";
-
- IteratorSetting is = new IteratorSetting(50, regex, RegExFilter.class);
- RegExFilter.setRegexs(is, null, regex, null, null, false);
-
- String cookie = tpc.proxy().createScanner(login, testTable, null);
-
- int i = 0;
- start = new Date();
- then = new Date();
- boolean hasNext = true;
-
- int k = 1000;
- while (hasNext) {
- ScanResult kvList = tpc.proxy().nextK(cookie, k);
-
- Date now = new Date();
- System.out.println(i + " " + (now.getTime() - then.getTime()));
- then = now;
-
- i += kvList.getResultsSize();
- // for (TKeyValue kv:kvList.getResults()) System.out.println(new Key(kv.getKey()));
- hasNext = kvList.isMore();
- }
- end = new Date();
- System.out.println("Total entries: " + i + " total time " + (end.getTime() - start.getTime()));
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyInstanceOperations.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyInstanceOperations.java b/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyInstanceOperations.java
deleted file mode 100644
index ff94dd4..0000000
--- a/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyInstanceOperations.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.proxy;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.Properties;
-
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.proxy.Proxy;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.server.TServer;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.net.HostAndPort;
-
-public class TestProxyInstanceOperations {
- private static final Logger log = LoggerFactory.getLogger(TestProxyInstanceOperations.class);
-
- protected static TServer proxy;
- protected static TestProxyClient tpc;
- protected static ByteBuffer userpass;
- protected static final int port = 10197;
-
- @BeforeClass
- public static void setup() throws Exception {
- Properties prop = new Properties();
- prop.setProperty("useMockInstance", "true");
- prop.put("tokenClass", PasswordToken.class.getName());
-
- proxy = Proxy.createProxyServer(HostAndPort.fromParts("localhost", port), new TCompactProtocol.Factory(), prop).server;
- log.info("Waiting for proxy to start");
- while (!proxy.isServing()) {
- Thread.sleep(500);
- }
- log.info("Proxy started");
- tpc = new TestProxyClient("localhost", port);
- userpass = tpc.proxy.login("root", Collections.singletonMap("password", ""));
- }
-
- @AfterClass
- public static void tearDown() throws InterruptedException {
- proxy.stop();
- }
-
- @Test
- public void properties() throws TException {
- tpc.proxy().setProperty(userpass, "test.systemprop", "whistletips");
-
- assertEquals(tpc.proxy().getSystemConfiguration(userpass).get("test.systemprop"), "whistletips");
- tpc.proxy().removeProperty(userpass, "test.systemprop");
- assertNull(tpc.proxy().getSystemConfiguration(userpass).get("test.systemprop"));
-
- }
-
- @Test
- public void testClassLoad() throws TException {
- assertTrue(tpc.proxy().testClassLoad(userpass, "org.apache.accumulo.core.iterators.user.RegExFilter", "org.apache.accumulo.core.iterators.Filter"));
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java b/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java
deleted file mode 100644
index 1a75fea..0000000
--- a/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyReadWrite.java
+++ /dev/null
@@ -1,468 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.proxy;
-
-import static org.junit.Assert.assertEquals;
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.iterators.user.RegExFilter;
-import org.apache.accumulo.proxy.Proxy;
-import org.apache.accumulo.proxy.Util;
-import org.apache.accumulo.proxy.thrift.BatchScanOptions;
-import org.apache.accumulo.proxy.thrift.ColumnUpdate;
-import org.apache.accumulo.proxy.thrift.IteratorSetting;
-import org.apache.accumulo.proxy.thrift.Key;
-import org.apache.accumulo.proxy.thrift.KeyValue;
-import org.apache.accumulo.proxy.thrift.Range;
-import org.apache.accumulo.proxy.thrift.ScanColumn;
-import org.apache.accumulo.proxy.thrift.ScanOptions;
-import org.apache.accumulo.proxy.thrift.ScanResult;
-import org.apache.accumulo.proxy.thrift.TimeType;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.server.TServer;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.net.HostAndPort;
-
-public class TestProxyReadWrite {
- protected static TServer proxy;
- protected static TestProxyClient tpc;
- protected static ByteBuffer userpass;
- protected static final int port = 10194;
- protected static final String testtable = "testtable";
-
- @BeforeClass
- public static void setup() throws Exception {
- Properties prop = new Properties();
- prop.setProperty("useMockInstance", "true");
- prop.put("tokenClass", PasswordToken.class.getName());
-
- proxy = Proxy.createProxyServer(HostAndPort.fromParts("localhost", port), new TCompactProtocol.Factory(), prop).server;
- tpc = new TestProxyClient("localhost", port);
- userpass = tpc.proxy().login("root", Collections.singletonMap("password", ""));
- }
-
- @AfterClass
- public static void tearDown() throws InterruptedException {
- proxy.stop();
- }
-
- @Before
- public void makeTestTable() throws Exception {
- tpc.proxy().createTable(userpass, testtable, true, TimeType.MILLIS);
- }
-
- @After
- public void deleteTestTable() throws Exception {
- tpc.proxy().deleteTable(userpass, testtable);
- }
-
- private static void addMutation(Map<ByteBuffer,List<ColumnUpdate>> mutations, String row, String cf, String cq, String value) {
- ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap(cf.getBytes()), ByteBuffer.wrap(cq.getBytes()));
- update.setValue(value.getBytes());
- mutations.put(ByteBuffer.wrap(row.getBytes()), Collections.singletonList(update));
- }
-
- private static void addMutation(Map<ByteBuffer,List<ColumnUpdate>> mutations, String row, String cf, String cq, String vis, String value) {
- ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap(cf.getBytes()), ByteBuffer.wrap(cq.getBytes()));
- update.setValue(value.getBytes());
- update.setColVisibility(vis.getBytes());
- mutations.put(ByteBuffer.wrap(row.getBytes()), Collections.singletonList(update));
- }
-
- /**
- * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a range so only the entries between -Inf...5 come back (there should be
- * 50,000)
- */
- @Test
- public void readWriteBatchOneShotWithRange() throws Exception {
- int maxInserts = 100000;
- Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
- String format = "%1$05d";
- for (int i = 0; i < maxInserts; i++) {
- addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
-
- if (i % 1000 == 0 || i == maxInserts - 1) {
- tpc.proxy().updateAndFlush(userpass, testtable, mutations);
- mutations.clear();
- }
- }
-
- Key stop = new Key();
- stop.setRow("5".getBytes());
- BatchScanOptions options = new BatchScanOptions();
- options.ranges = Collections.singletonList(new Range(null, false, stop, false));
- String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
-
- int i = 0;
- boolean hasNext = true;
-
- int k = 1000;
- while (hasNext) {
- ScanResult kvList = tpc.proxy().nextK(cookie, k);
- i += kvList.getResultsSize();
- hasNext = kvList.isMore();
- }
- assertEquals(i, 50000);
- }
-
- /**
- * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a columnFamily so only the entries with specified column family come back
- * (there should be 50,000)
- */
- @Test
- public void readWriteBatchOneShotWithColumnFamilyOnly() throws Exception {
- int maxInserts = 100000;
- Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
- String format = "%1$05d";
- for (int i = 0; i < maxInserts; i++) {
-
- addMutation(mutations, String.format(format, i), "cf" + (i % 2), "cq" + (i % 2), Util.randString(10));
-
- if (i % 1000 == 0 || i == maxInserts - 1) {
- tpc.proxy().updateAndFlush(userpass, testtable, mutations);
- mutations.clear();
- }
- }
-
- BatchScanOptions options = new BatchScanOptions();
-
- ScanColumn sc = new ScanColumn();
- sc.colFamily = ByteBuffer.wrap("cf0".getBytes());
-
- options.columns = Collections.singletonList(sc);
- String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
-
- int i = 0;
- boolean hasNext = true;
-
- int k = 1000;
- while (hasNext) {
- ScanResult kvList = tpc.proxy().nextK(cookie, k);
- i += kvList.getResultsSize();
- hasNext = kvList.isMore();
- }
- assertEquals(i, 50000);
- }
-
- /**
- * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Set a columnFamily + columnQualififer so only the entries with specified column
- * come back (there should be 50,000)
- */
- @Test
- public void readWriteBatchOneShotWithFullColumn() throws Exception {
- int maxInserts = 100000;
- Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
- String format = "%1$05d";
- for (int i = 0; i < maxInserts; i++) {
-
- addMutation(mutations, String.format(format, i), "cf" + (i % 2), "cq" + (i % 2), Util.randString(10));
-
- if (i % 1000 == 0 || i == maxInserts - 1) {
- tpc.proxy().updateAndFlush(userpass, testtable, mutations);
- mutations.clear();
- }
- }
-
- BatchScanOptions options = new BatchScanOptions();
-
- ScanColumn sc = new ScanColumn();
- sc.colFamily = ByteBuffer.wrap("cf0".getBytes());
- sc.colQualifier = ByteBuffer.wrap("cq0".getBytes());
-
- options.columns = Collections.singletonList(sc);
- String cookie = tpc.proxy().createBatchScanner(userpass, testtable, options);
-
- int i = 0;
- boolean hasNext = true;
-
- int k = 1000;
- while (hasNext) {
- ScanResult kvList = tpc.proxy().nextK(cookie, k);
- i += kvList.getResultsSize();
- hasNext = kvList.isMore();
- }
- assertEquals(i, 50000);
- }
-
- /**
- * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Filter the results so only the even numbers come back.
- */
- @Test
- public void readWriteBatchOneShotWithFilterIterator() throws Exception {
- int maxInserts = 10000;
- Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
- String format = "%1$05d";
- for (int i = 0; i < maxInserts; i++) {
- addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
-
- if (i % 1000 == 0 || i == maxInserts - 1) {
- tpc.proxy().updateAndFlush(userpass, testtable, mutations);
- mutations.clear();
- }
-
- }
-
- String regex = ".*[02468]";
-
- org.apache.accumulo.core.client.IteratorSetting is = new org.apache.accumulo.core.client.IteratorSetting(50, regex, RegExFilter.class);
- RegExFilter.setRegexs(is, regex, null, null, null, false);
-
- IteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
- ScanOptions opts = new ScanOptions();
- opts.iterators = Collections.singletonList(pis);
- String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
-
- int i = 0;
- boolean hasNext = true;
-
- int k = 1000;
- while (hasNext) {
- ScanResult kvList = tpc.proxy().nextK(cookie, k);
- for (KeyValue kv : kvList.getResults()) {
- assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
-
- i += 2;
- }
- hasNext = kvList.isMore();
- }
- }
-
- @Test
- public void readWriteOneShotWithRange() throws Exception {
- int maxInserts = 100000;
- Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
- String format = "%1$05d";
- for (int i = 0; i < maxInserts; i++) {
- addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
-
- if (i % 1000 == 0 || i == maxInserts - 1) {
- tpc.proxy().updateAndFlush(userpass, testtable, mutations);
- mutations.clear();
- }
- }
-
- Key stop = new Key();
- stop.setRow("5".getBytes());
- ScanOptions opts = new ScanOptions();
- opts.range = new Range(null, false, stop, false);
- String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
-
- int i = 0;
- boolean hasNext = true;
-
- int k = 1000;
- while (hasNext) {
- ScanResult kvList = tpc.proxy().nextK(cookie, k);
- i += kvList.getResultsSize();
- hasNext = kvList.isMore();
- }
- assertEquals(i, 50000);
- }
-
- /**
- * Insert 100000 cells which have as the row [0..99999] (padded with zeros). Filter the results so only the even numbers come back.
- */
- @Test
- public void readWriteOneShotWithFilterIterator() throws Exception {
- int maxInserts = 10000;
- Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
- String format = "%1$05d";
- for (int i = 0; i < maxInserts; i++) {
- addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
-
- if (i % 1000 == 0 || i == maxInserts - 1) {
-
- tpc.proxy().updateAndFlush(userpass, testtable, mutations);
- mutations.clear();
-
- }
-
- }
-
- String regex = ".*[02468]";
-
- org.apache.accumulo.core.client.IteratorSetting is = new org.apache.accumulo.core.client.IteratorSetting(50, regex, RegExFilter.class);
- RegExFilter.setRegexs(is, regex, null, null, null, false);
-
- IteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
- ScanOptions opts = new ScanOptions();
- opts.iterators = Collections.singletonList(pis);
- String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
-
- int i = 0;
- boolean hasNext = true;
-
- int k = 1000;
- while (hasNext) {
- ScanResult kvList = tpc.proxy().nextK(cookie, k);
- for (KeyValue kv : kvList.getResults()) {
- assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
-
- i += 2;
- }
- hasNext = kvList.isMore();
- }
- }
-
- // @Test
- // This test takes kind of a long time. Enable it if you think you may have memory issues.
- public void manyWritesAndReads() throws Exception {
- int maxInserts = 1000000;
- Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
- String format = "%1$06d";
- String writer = tpc.proxy().createWriter(userpass, testtable, null);
- for (int i = 0; i < maxInserts; i++) {
- addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
-
- if (i % 1000 == 0 || i == maxInserts - 1) {
-
- tpc.proxy().update(writer, mutations);
- mutations.clear();
-
- }
-
- }
-
- tpc.proxy().flush(writer);
- tpc.proxy().closeWriter(writer);
-
- String cookie = tpc.proxy().createScanner(userpass, testtable, null);
-
- int i = 0;
- boolean hasNext = true;
-
- int k = 1000;
- while (hasNext) {
- ScanResult kvList = tpc.proxy().nextK(cookie, k);
- for (KeyValue kv : kvList.getResults()) {
- assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
- i++;
- }
- hasNext = kvList.isMore();
- if (hasNext)
- assertEquals(k, kvList.getResults().size());
- }
- assertEquals(maxInserts, i);
- }
-
- @Test
- public void asynchReadWrite() throws Exception {
- int maxInserts = 10000;
- Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
- String format = "%1$05d";
- String writer = tpc.proxy().createWriter(userpass, testtable, null);
- for (int i = 0; i < maxInserts; i++) {
- addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, Util.randString(10));
-
- if (i % 1000 == 0 || i == maxInserts - 1) {
- tpc.proxy().update(writer, mutations);
- mutations.clear();
- }
- }
-
- tpc.proxy().flush(writer);
- tpc.proxy().closeWriter(writer);
-
- String regex = ".*[02468]";
-
- org.apache.accumulo.core.client.IteratorSetting is = new org.apache.accumulo.core.client.IteratorSetting(50, regex, RegExFilter.class);
- RegExFilter.setRegexs(is, regex, null, null, null, false);
-
- IteratorSetting pis = Util.iteratorSetting2ProxyIteratorSetting(is);
- ScanOptions opts = new ScanOptions();
- opts.iterators = Collections.singletonList(pis);
- String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
-
- int i = 0;
- boolean hasNext = true;
-
- int k = 1000;
- int numRead = 0;
- while (hasNext) {
- ScanResult kvList = tpc.proxy().nextK(cookie, k);
- for (KeyValue kv : kvList.getResults()) {
- assertEquals(i, Integer.parseInt(new String(kv.getKey().getRow())));
- numRead++;
- i += 2;
- }
- hasNext = kvList.isMore();
- }
- assertEquals(maxInserts / 2, numRead);
- }
-
- @Test
- public void testVisibility() throws Exception {
-
- Set<ByteBuffer> auths = new HashSet<ByteBuffer>();
- auths.add(ByteBuffer.wrap("even".getBytes()));
- tpc.proxy().changeUserAuthorizations(userpass, "root", auths);
-
- int maxInserts = 10000;
- Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
- String format = "%1$05d";
- String writer = tpc.proxy().createWriter(userpass, testtable, null);
- for (int i = 0; i < maxInserts; i++) {
- if (i % 2 == 0)
- addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, "even", Util.randString(10));
- else
- addMutation(mutations, String.format(format, i), "cf" + i, "cq" + i, "odd", Util.randString(10));
-
- if (i % 1000 == 0 || i == maxInserts - 1) {
- tpc.proxy().update(writer, mutations);
- mutations.clear();
- }
- }
-
- tpc.proxy().flush(writer);
- tpc.proxy().closeWriter(writer);
- ScanOptions opts = new ScanOptions();
- opts.authorizations = auths;
- String cookie = tpc.proxy().createScanner(userpass, testtable, opts);
-
- int i = 0;
- boolean hasNext = true;
-
- int k = 1000;
- int numRead = 0;
- while (hasNext) {
- ScanResult kvList = tpc.proxy().nextK(cookie, k);
- for (KeyValue kv : kvList.getResults()) {
- assertEquals(Integer.parseInt(new String(kv.getKey().getRow())), i);
- i += 2;
- numRead++;
- }
- hasNext = kvList.isMore();
-
- }
- assertEquals(maxInserts / 2, numRead);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/proxy/TestProxySecurityOperations.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/proxy/TestProxySecurityOperations.java b/test/src/test/java/org/apache/accumulo/test/proxy/TestProxySecurityOperations.java
deleted file mode 100644
index eda38e5..0000000
--- a/test/src/test/java/org/apache/accumulo/test/proxy/TestProxySecurityOperations.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.proxy;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.accumulo.proxy.Proxy;
-import org.apache.accumulo.proxy.thrift.SystemPermission;
-import org.apache.accumulo.proxy.thrift.TablePermission;
-import org.apache.accumulo.proxy.thrift.TimeType;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.server.TServer;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.net.HostAndPort;
-
-public class TestProxySecurityOperations {
- protected static TServer proxy;
- protected static TestProxyClient tpc;
- protected static ByteBuffer userpass;
- protected static final int port = 10196;
- protected static final String testtable = "testtable";
- protected static final String testuser = "VonJines";
- protected static final ByteBuffer testpw = ByteBuffer.wrap("fiveones".getBytes());
-
- @BeforeClass
- public static void setup() throws Exception {
- Properties prop = new Properties();
- prop.setProperty("useMockInstance", "true");
- prop.put("tokenClass", PasswordToken.class.getName());
-
- proxy = Proxy.createProxyServer(HostAndPort.fromParts("localhost", port), new TCompactProtocol.Factory(), prop).server;
- while (!proxy.isServing()) {
- Thread.sleep(500);
- }
- tpc = new TestProxyClient("localhost", port);
- userpass = tpc.proxy().login("root", Collections.singletonMap("password", ""));
- }
-
- @AfterClass
- public static void tearDown() throws InterruptedException {
- proxy.stop();
- }
-
- @Before
- public void makeTestTableAndUser() throws Exception {
- tpc.proxy().createTable(userpass, testtable, true, TimeType.MILLIS);
- tpc.proxy().createLocalUser(userpass, testuser, testpw);
- }
-
- @After
- public void deleteTestTable() throws Exception {
- tpc.proxy().deleteTable(userpass, testtable);
- tpc.proxy().dropLocalUser(userpass, testuser);
- }
-
- @Test
- public void create() throws TException {
- tpc.proxy().createLocalUser(userpass, testuser + "2", testpw);
- assertTrue(tpc.proxy().listLocalUsers(userpass).contains(testuser + "2"));
- tpc.proxy().dropLocalUser(userpass, testuser + "2");
- assertTrue(!tpc.proxy().listLocalUsers(userpass).contains(testuser + "2"));
- }
-
- @Test
- public void authenticate() throws TException {
- assertTrue(tpc.proxy().authenticateUser(userpass, testuser, bb2pp(testpw)));
- assertFalse(tpc.proxy().authenticateUser(userpass, "EvilUser", bb2pp(testpw)));
-
- tpc.proxy().changeLocalUserPassword(userpass, testuser, ByteBuffer.wrap("newpass".getBytes()));
- assertFalse(tpc.proxy().authenticateUser(userpass, testuser, bb2pp(testpw)));
- assertTrue(tpc.proxy().authenticateUser(userpass, testuser, bb2pp(ByteBuffer.wrap("newpass".getBytes()))));
-
- }
-
- @Test
- public void tablePermissions() throws TException {
- tpc.proxy().grantTablePermission(userpass, testuser, testtable, TablePermission.ALTER_TABLE);
- assertTrue(tpc.proxy().hasTablePermission(userpass, testuser, testtable, TablePermission.ALTER_TABLE));
-
- tpc.proxy().revokeTablePermission(userpass, testuser, testtable, TablePermission.ALTER_TABLE);
- assertFalse(tpc.proxy().hasTablePermission(userpass, testuser, testtable, TablePermission.ALTER_TABLE));
-
- }
-
- @Test
- public void systemPermissions() throws TException {
- tpc.proxy().grantSystemPermission(userpass, testuser, SystemPermission.ALTER_USER);
- assertTrue(tpc.proxy().hasSystemPermission(userpass, testuser, SystemPermission.ALTER_USER));
-
- tpc.proxy().revokeSystemPermission(userpass, testuser, SystemPermission.ALTER_USER);
- assertFalse(tpc.proxy().hasSystemPermission(userpass, testuser, SystemPermission.ALTER_USER));
-
- }
-
- @Test
- public void auths() throws TException {
- HashSet<ByteBuffer> newauths = new HashSet<ByteBuffer>();
- newauths.add(ByteBuffer.wrap("BBR".getBytes()));
- newauths.add(ByteBuffer.wrap("Barney".getBytes()));
- tpc.proxy().changeUserAuthorizations(userpass, testuser, newauths);
- List<ByteBuffer> actualauths = tpc.proxy().getUserAuthorizations(userpass, testuser);
- assertEquals(actualauths.size(), newauths.size());
-
- for (ByteBuffer auth : actualauths) {
- assertTrue(newauths.contains(auth));
- }
- }
-
- private Map<String,String> bb2pp(ByteBuffer cf) {
- Map<String,String> toRet = new TreeMap<String,String>();
- toRet.put("password", ByteBufferUtil.toString(cf));
- return toRet;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyTableOperations.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyTableOperations.java b/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyTableOperations.java
deleted file mode 100644
index e8d7b1e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/proxy/TestProxyTableOperations.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.proxy;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.proxy.Proxy;
-import org.apache.accumulo.proxy.thrift.ColumnUpdate;
-import org.apache.accumulo.proxy.thrift.TimeType;
-import org.apache.thrift.TException;
-import org.apache.thrift.protocol.TCompactProtocol;
-import org.apache.thrift.server.TServer;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.net.HostAndPort;
-
-public class TestProxyTableOperations {
-
- protected static TServer proxy;
- protected static TestProxyClient tpc;
- protected static ByteBuffer userpass;
- protected static final int port = 10195;
- protected static final String testtable = "testtable";
-
- @BeforeClass
- public static void setup() throws Exception {
- Properties prop = new Properties();
- prop.setProperty("useMockInstance", "true");
- prop.put("tokenClass", PasswordToken.class.getName());
-
- proxy = Proxy.createProxyServer(HostAndPort.fromParts("localhost", port), new TCompactProtocol.Factory(), prop).server;
- while (!proxy.isServing()) {
- Thread.sleep(500);
- }
- tpc = new TestProxyClient("localhost", port);
- userpass = tpc.proxy().login("root", Collections.singletonMap("password", ""));
- }
-
- @AfterClass
- public static void tearDown() throws InterruptedException {
- proxy.stop();
- }
-
- @Before
- public void makeTestTable() throws Exception {
- tpc.proxy().createTable(userpass, testtable, true, TimeType.MILLIS);
- }
-
- @After
- public void deleteTestTable() throws Exception {
- tpc.proxy().deleteTable(userpass, testtable);
- }
-
- @Test
- public void createExistsDelete() throws TException {
- assertFalse(tpc.proxy().tableExists(userpass, "testtable2"));
- tpc.proxy().createTable(userpass, "testtable2", true, TimeType.MILLIS);
- assertTrue(tpc.proxy().tableExists(userpass, "testtable2"));
- tpc.proxy().deleteTable(userpass, "testtable2");
- assertFalse(tpc.proxy().tableExists(userpass, "testtable2"));
- }
-
- @Test
- public void listRename() throws TException {
- assertFalse(tpc.proxy().tableExists(userpass, "testtable2"));
- tpc.proxy().renameTable(userpass, testtable, "testtable2");
- assertTrue(tpc.proxy().tableExists(userpass, "testtable2"));
- tpc.proxy().renameTable(userpass, "testtable2", testtable);
- assertTrue(tpc.proxy().listTables(userpass).contains("testtable"));
-
- }
-
- // This test does not yet function because the backing Mock instance does not yet support merging
- @Test
- public void merge() throws TException {
- Set<ByteBuffer> splits = new HashSet<ByteBuffer>();
- splits.add(ByteBuffer.wrap("a".getBytes()));
- splits.add(ByteBuffer.wrap("c".getBytes()));
- splits.add(ByteBuffer.wrap("z".getBytes()));
- tpc.proxy().addSplits(userpass, testtable, splits);
-
- tpc.proxy().mergeTablets(userpass, testtable, ByteBuffer.wrap("b".getBytes()), ByteBuffer.wrap("d".getBytes()));
-
- splits.remove(ByteBuffer.wrap("c".getBytes()));
-
- List<ByteBuffer> tableSplits = tpc.proxy().listSplits(userpass, testtable, 10);
-
- for (ByteBuffer split : tableSplits)
- assertTrue(splits.contains(split));
- assertTrue(tableSplits.size() == splits.size());
-
- }
-
- @Test
- public void splits() throws TException {
- Set<ByteBuffer> splits = new HashSet<ByteBuffer>();
- splits.add(ByteBuffer.wrap("a".getBytes()));
- splits.add(ByteBuffer.wrap("b".getBytes()));
- splits.add(ByteBuffer.wrap("z".getBytes()));
- tpc.proxy().addSplits(userpass, testtable, splits);
-
- List<ByteBuffer> tableSplits = tpc.proxy().listSplits(userpass, testtable, 10);
-
- for (ByteBuffer split : tableSplits)
- assertTrue(splits.contains(split));
- assertTrue(tableSplits.size() == splits.size());
- }
-
- @Test
- public void constraints() throws TException {
- int cid = tpc.proxy().addConstraint(userpass, testtable, "org.apache.accumulo.TestConstraint");
- Map<String,Integer> constraints = tpc.proxy().listConstraints(userpass, testtable);
- assertEquals((int) constraints.get("org.apache.accumulo.TestConstraint"), cid);
- tpc.proxy().removeConstraint(userpass, testtable, cid);
- constraints = tpc.proxy().listConstraints(userpass, testtable);
- assertNull(constraints.get("org.apache.accumulo.TestConstraint"));
- }
-
- @Test
- public void localityGroups() throws TException {
- Map<String,Set<String>> groups = new HashMap<String,Set<String>>();
- Set<String> group1 = new HashSet<String>();
- group1.add("cf1");
- groups.put("group1", group1);
- Set<String> group2 = new HashSet<String>();
- group2.add("cf2");
- group2.add("cf3");
- groups.put("group2", group2);
- tpc.proxy().setLocalityGroups(userpass, testtable, groups);
-
- Map<String,Set<String>> actualGroups = tpc.proxy().getLocalityGroups(userpass, testtable);
-
- assertEquals(groups.size(), actualGroups.size());
- for (String groupName : groups.keySet()) {
- assertTrue(actualGroups.containsKey(groupName));
- assertEquals(groups.get(groupName).size(), actualGroups.get(groupName).size());
- for (String cf : groups.get(groupName)) {
- assertTrue(actualGroups.get(groupName).contains(cf));
- }
- }
- }
-
- @Test
- public void tableProperties() throws TException {
- tpc.proxy().setTableProperty(userpass, testtable, "test.property1", "wharrrgarbl");
- assertEquals(tpc.proxy().getTableProperties(userpass, testtable).get("test.property1"), "wharrrgarbl");
- tpc.proxy().removeTableProperty(userpass, testtable, "test.property1");
- assertNull(tpc.proxy().getTableProperties(userpass, testtable).get("test.property1"));
- }
-
- private static void addMutation(Map<ByteBuffer,List<ColumnUpdate>> mutations, String row, String cf, String cq, String value) {
- ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap(cf.getBytes()), ByteBuffer.wrap(cq.getBytes()));
- update.setValue(value.getBytes());
- mutations.put(ByteBuffer.wrap(row.getBytes()), Collections.singletonList(update));
- }
-
- @Test
- public void tableOperationsRowMethods() throws TException {
- Map<ByteBuffer,List<ColumnUpdate>> mutations = new HashMap<ByteBuffer,List<ColumnUpdate>>();
- for (int i = 0; i < 10; i++) {
- addMutation(mutations, "" + i, "cf", "cq", "");
- }
- tpc.proxy().updateAndFlush(userpass, testtable, mutations);
-
- assertEquals(tpc.proxy().getMaxRow(userpass, testtable, null, null, true, null, true), ByteBuffer.wrap("9".getBytes()));
-
- tpc.proxy().deleteRows(userpass, testtable, ByteBuffer.wrap("51".getBytes()), ByteBuffer.wrap("99".getBytes()));
- assertEquals(tpc.proxy().getMaxRow(userpass, testtable, null, null, true, null, true), ByteBuffer.wrap("5".getBytes()));
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java b/test/src/test/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
deleted file mode 100644
index 3a1d413..0000000
--- a/test/src/test/java/org/apache/accumulo/test/replication/CyclicReplicationIT.java
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.replication;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.OutputStream;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.NewTableConfiguration;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.LongCombiner.Type;
-import org.apache.accumulo.core.iterators.user.SummingCombiner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.minicluster.impl.ProcessReference;
-import org.apache.accumulo.minicluster.impl.ZooKeeperBindException;
-import org.apache.accumulo.server.replication.ReplicaSystemFactory;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.tserver.TabletServer;
-import org.apache.accumulo.tserver.replication.AccumuloReplicaSystem;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterables;
-
-/**
- *
- */
-public class CyclicReplicationIT {
- private static final Logger log = LoggerFactory.getLogger(CyclicReplicationIT.class);
-
- @Rule
- public Timeout getTimeout() {
- int scalingFactor = 1;
- try {
- scalingFactor = Integer.parseInt(System.getProperty("timeout.factor"));
- } catch (NumberFormatException exception) {
- log.warn("Could not parse timeout.factor, not scaling timeout");
- }
-
- return new Timeout(scalingFactor * 5 * 60 * 1000);
- }
-
- @Rule
- public TestName testName = new TestName();
-
- private File createTestDir(String name) {
- File baseDir = new File(System.getProperty("user.dir") + "/target/mini-tests");
- assertTrue(baseDir.mkdirs() || baseDir.isDirectory());
- File testDir = new File(baseDir, this.getClass().getName() + "_" + testName.getMethodName() + "_" + name);
- FileUtils.deleteQuietly(testDir);
- assertTrue(testDir.mkdir());
- return testDir;
- }
-
- private void setCoreSite(MiniAccumuloClusterImpl cluster) throws Exception {
- File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml");
- if (csFile.exists())
- throw new RuntimeException(csFile + " already exist");
-
- Configuration coreSite = new Configuration(false);
- coreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "core-site.xml")));
- coreSite.writeXml(out);
- out.close();
- }
-
- /**
- * Use the same SSL and credential provider configuration that is set up by AbstractMacIT for the other MAC used for replication
- */
- private void updatePeerConfigFromPrimary(MiniAccumuloConfigImpl primaryCfg, MiniAccumuloConfigImpl peerCfg) {
- // Set the same SSL information from the primary when present
- Map<String,String> primarySiteConfig = primaryCfg.getSiteConfig();
- if ("true".equals(primarySiteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
- Map<String,String> peerSiteConfig = new HashMap<String,String>();
- peerSiteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true");
- String keystorePath = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PATH.getKey());
- Assert.assertNotNull("Keystore Path was null", keystorePath);
- peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), keystorePath);
- String truststorePath = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PATH.getKey());
- Assert.assertNotNull("Truststore Path was null", truststorePath);
- peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), truststorePath);
-
- // Passwords might be stored in CredentialProvider
- String keystorePassword = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey());
- if (null != keystorePassword) {
- peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), keystorePassword);
- }
- String truststorePassword = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey());
- if (null != truststorePassword) {
- peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword);
- }
-
- System.out.println("Setting site configuration for peer " + peerSiteConfig);
- peerCfg.setSiteConfig(peerSiteConfig);
- }
-
- // Use the CredentialProvider if the primary also uses one
- String credProvider = primarySiteConfig.get(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
- if (null != credProvider) {
- Map<String,String> peerSiteConfig = peerCfg.getSiteConfig();
- peerSiteConfig.put(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey(), credProvider);
- peerCfg.setSiteConfig(peerSiteConfig);
- }
- }
-
- @Test
- public void dataIsNotOverReplicated() throws Exception {
- File master1Dir = createTestDir("master1"), master2Dir = createTestDir("master2");
- String password = "password";
-
- MiniAccumuloConfigImpl master1Cfg;
- MiniAccumuloClusterImpl master1Cluster;
- while (true) {
- master1Cfg = new MiniAccumuloConfigImpl(master1Dir, password);
- master1Cfg.setNumTservers(1);
- master1Cfg.setInstanceName("master1");
-
- // Set up SSL if needed
- ConfigurableMacBase.configureForEnvironment(master1Cfg, this.getClass(), ConfigurableMacBase.getSslDir(master1Dir));
-
- master1Cfg.setProperty(Property.REPLICATION_NAME, master1Cfg.getInstanceName());
- master1Cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "5M");
- master1Cfg.setProperty(Property.REPLICATION_THREADCHECK, "5m");
- master1Cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
- master1Cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
- master1Cluster = new MiniAccumuloClusterImpl(master1Cfg);
- setCoreSite(master1Cluster);
-
- try {
- master1Cluster.start();
- break;
- } catch (ZooKeeperBindException e) {
- log.warn("Failed to start ZooKeeper on " + master1Cfg.getZooKeeperPort() + ", will retry");
- }
- }
-
- MiniAccumuloConfigImpl master2Cfg;
- MiniAccumuloClusterImpl master2Cluster;
- while (true) {
- master2Cfg = new MiniAccumuloConfigImpl(master2Dir, password);
- master2Cfg.setNumTservers(1);
- master2Cfg.setInstanceName("master2");
-
- // Set up SSL if needed. Need to share the same SSL truststore as master1
- this.updatePeerConfigFromPrimary(master1Cfg, master2Cfg);
-
- master2Cfg.setProperty(Property.REPLICATION_NAME, master2Cfg.getInstanceName());
- master2Cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "5M");
- master2Cfg.setProperty(Property.REPLICATION_THREADCHECK, "5m");
- master2Cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
- master2Cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
- master2Cluster = new MiniAccumuloClusterImpl(master2Cfg);
- setCoreSite(master2Cluster);
-
- try {
- master2Cluster.start();
- break;
- } catch (ZooKeeperBindException e) {
- log.warn("Failed to start ZooKeeper on " + master2Cfg.getZooKeeperPort() + ", will retry");
- }
- }
-
- try {
- Connector connMaster1 = master1Cluster.getConnector("root", new PasswordToken(password)), connMaster2 = master2Cluster.getConnector("root",
- new PasswordToken(password));
-
- String master1UserName = "master1", master1Password = "foo";
- String master2UserName = "master2", master2Password = "bar";
- String master1Table = master1Cluster.getInstanceName(), master2Table = master2Cluster.getInstanceName();
-
- connMaster1.securityOperations().createLocalUser(master1UserName, new PasswordToken(master1Password));
- connMaster2.securityOperations().createLocalUser(master2UserName, new PasswordToken(master2Password));
-
- // Configure the credentials we should use to authenticate ourselves to the peer for replication
- connMaster1.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + master2Cluster.getInstanceName(), master2UserName);
- connMaster1.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + master2Cluster.getInstanceName(), master2Password);
-
- connMaster2.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + master1Cluster.getInstanceName(), master1UserName);
- connMaster2.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + master1Cluster.getInstanceName(), master1Password);
-
- connMaster1.instanceOperations().setProperty(
- Property.REPLICATION_PEERS.getKey() + master2Cluster.getInstanceName(),
- ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
- AccumuloReplicaSystem.buildConfiguration(master2Cluster.getInstanceName(), master2Cluster.getZooKeepers())));
-
- connMaster2.instanceOperations().setProperty(
- Property.REPLICATION_PEERS.getKey() + master1Cluster.getInstanceName(),
- ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
- AccumuloReplicaSystem.buildConfiguration(master1Cluster.getInstanceName(), master1Cluster.getZooKeepers())));
-
- connMaster1.tableOperations().create(master1Table, new NewTableConfiguration().withoutDefaultIterators());
- String master1TableId = connMaster1.tableOperations().tableIdMap().get(master1Table);
- Assert.assertNotNull(master1TableId);
-
- connMaster2.tableOperations().create(master2Table, new NewTableConfiguration().withoutDefaultIterators());
- String master2TableId = connMaster2.tableOperations().tableIdMap().get(master2Table);
- Assert.assertNotNull(master2TableId);
-
- // Replicate master1 in the master1 cluster to master2 in the master2 cluster
- connMaster1.tableOperations().setProperty(master1Table, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster1.tableOperations().setProperty(master1Table, Property.TABLE_REPLICATION_TARGET.getKey() + master2Cluster.getInstanceName(), master2TableId);
-
- // Replicate master2 in the master2 cluster to master1 in the master2 cluster
- connMaster2.tableOperations().setProperty(master2Table, Property.TABLE_REPLICATION.getKey(), "true");
- connMaster2.tableOperations().setProperty(master2Table, Property.TABLE_REPLICATION_TARGET.getKey() + master1Cluster.getInstanceName(), master1TableId);
-
- // Give our replication user the ability to write to the respective table
- connMaster1.securityOperations().grantTablePermission(master1UserName, master1Table, TablePermission.WRITE);
- connMaster2.securityOperations().grantTablePermission(master2UserName, master2Table, TablePermission.WRITE);
-
- IteratorSetting summingCombiner = new IteratorSetting(50, SummingCombiner.class);
- SummingCombiner.setEncodingType(summingCombiner, Type.STRING);
- SummingCombiner.setCombineAllColumns(summingCombiner, true);
-
- // Set a combiner on both instances that will sum multiple values
- // We can use this to verify that the mutation was not sent multiple times
- connMaster1.tableOperations().attachIterator(master1Table, summingCombiner);
- connMaster2.tableOperations().attachIterator(master2Table, summingCombiner);
-
- // Write a single entry
- BatchWriter bw = connMaster1.createBatchWriter(master1Table, new BatchWriterConfig());
- Mutation m = new Mutation("row");
- m.put("count", "", "1");
- bw.addMutation(m);
- bw.close();
-
- Set<String> files = connMaster1.replicationOperations().referencedFiles(master1Table);
-
- log.info("Found {} that need replication from master1", files);
-
- // Kill and restart the tserver to close the WAL on master1
- for (ProcessReference proc : master1Cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- master1Cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
-
- master1Cluster.exec(TabletServer.class);
-
- log.info("Restarted tserver on master1");
-
- // Try to avoid ACCUMULO-2964
- Thread.sleep(1000);
-
- // Sanity check that the element is there on master1
- Scanner s = connMaster1.createScanner(master1Table, Authorizations.EMPTY);
- Entry<Key,Value> entry = Iterables.getOnlyElement(s);
- Assert.assertEquals("1", entry.getValue().toString());
-
- // Wait for this table to replicate
- connMaster1.replicationOperations().drain(master1Table, files);
-
- Thread.sleep(5000);
-
- // Check that the element made it to master2 only once
- s = connMaster2.createScanner(master2Table, Authorizations.EMPTY);
- entry = Iterables.getOnlyElement(s);
- Assert.assertEquals("1", entry.getValue().toString());
-
- // Wait for master2 to finish replicating it back
- files = connMaster2.replicationOperations().referencedFiles(master2Table);
-
- // Kill and restart the tserver to close the WAL on master2
- for (ProcessReference proc : master2Cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
- master2Cluster.killProcess(ServerType.TABLET_SERVER, proc);
- }
-
- master2Cluster.exec(TabletServer.class);
-
- // Try to avoid ACCUMULO-2964
- Thread.sleep(1000);
-
- // Check that the element made it to master2 only once
- s = connMaster2.createScanner(master2Table, Authorizations.EMPTY);
- entry = Iterables.getOnlyElement(s);
- Assert.assertEquals("1", entry.getValue().toString());
-
- connMaster2.replicationOperations().drain(master2Table, files);
-
- Thread.sleep(5000);
-
- // Verify that the entry wasn't sent back to master1
- s = connMaster1.createScanner(master1Table, Authorizations.EMPTY);
- entry = Iterables.getOnlyElement(s);
- Assert.assertEquals("1", entry.getValue().toString());
- } finally {
- master1Cluster.stop();
- master2Cluster.stop();
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java b/test/src/test/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
deleted file mode 100644
index ab142d0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/replication/GarbageCollectorCommunicatesWithTServersIT.java
+++ /dev/null
@@ -1,417 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.replication;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.impl.ClientContext;
-import org.apache.accumulo.core.client.impl.ClientExecReturn;
-import org.apache.accumulo.core.client.impl.Credentials;
-import org.apache.accumulo.core.client.impl.MasterClient;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.master.thrift.MasterClientService;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.protobuf.ProtobufUtil;
-import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.rpc.ThriftUtil;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Client;
-import org.apache.accumulo.core.trace.Tracer;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.log.WalStateManager;
-import org.apache.accumulo.server.log.WalStateManager.WalState;
-import org.apache.accumulo.server.replication.proto.Replication.Status;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.net.HostAndPort;
-
-/**
- * ACCUMULO-3302 series of tests which ensure that a WAL is prematurely closed when a TServer may still continue to use it. Checking that no tablet references a
- * WAL is insufficient to determine if a WAL will never be used in the future.
- */
-public class GarbageCollectorCommunicatesWithTServersIT extends ConfigurableMacBase {
- private static final Logger log = LoggerFactory.getLogger(GarbageCollectorCommunicatesWithTServersIT.class);
-
- private final int GC_PERIOD_SECONDS = 1;
-
- @Override
- public int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
- cfg.setNumTservers(1);
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- cfg.setProperty(Property.GC_CYCLE_DELAY, GC_PERIOD_SECONDS + "s");
- // Wait longer to try to let the replication table come online before a cycle runs
- cfg.setProperty(Property.GC_CYCLE_START, "10s");
- cfg.setProperty(Property.REPLICATION_NAME, "master");
- // Set really long delays for the master to do stuff for replication. We don't need
- // it to be doing anything, so just let it sleep
- cfg.setProperty(Property.REPLICATION_WORK_PROCESSOR_DELAY, "240s");
- cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "240s");
- cfg.setProperty(Property.REPLICATION_DRIVER_DELAY, "240s");
- // Pull down the maximum size of the wal so we can test close()'ing it.
- cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "1M");
- coreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- /**
- * Fetch all of the WALs referenced by tablets in the metadata table for this table
- */
- private Set<String> getWalsForTable(String tableName) throws Exception {
- final Connector conn = getConnector();
- final String tableId = conn.tableOperations().tableIdMap().get(tableName);
-
- Assert.assertNotNull("Could not determine table ID for " + tableName, tableId);
-
- Instance i = conn.getInstance();
- ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
- WalStateManager wals = new WalStateManager(conn.getInstance(), zk);
-
- Set<String> result = new HashSet<String>();
- for (Entry<Path,WalState> entry : wals.getAllState().entrySet()) {
- log.debug("Reading WALs: {}={}", entry.getKey(), entry.getValue());
- result.add(entry.getKey().toString());
- }
- return result;
- }
-
- /**
- * Fetch all of the rfiles referenced by tablets in the metadata table for this table
- */
- private Set<String> getFilesForTable(String tableName) throws Exception {
- final Connector conn = getConnector();
- final String tableId = conn.tableOperations().tableIdMap().get(tableName);
-
- Assert.assertNotNull("Could not determine table ID for " + tableName, tableId);
-
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- Range r = MetadataSchema.TabletsSection.getRange(tableId);
- s.setRange(r);
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
-
- Set<String> rfiles = new HashSet<String>();
- for (Entry<Key,Value> entry : s) {
- log.debug("Reading RFiles: {}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
- // uri://path/to/wal
- String cq = entry.getKey().getColumnQualifier().toString();
- String path = new Path(cq).toString();
- log.debug("Normalize path to rfile: {}", path);
- rfiles.add(path);
- }
-
- return rfiles;
- }
-
- /**
- * Get the replication status messages for the given table that exist in the metadata table (~repl entries)
- */
- private Map<String,Status> getMetadataStatusForTable(String tableName) throws Exception {
- final Connector conn = getConnector();
- final String tableId = conn.tableOperations().tableIdMap().get(tableName);
-
- Assert.assertNotNull("Could not determine table ID for " + tableName, tableId);
-
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- Range r = MetadataSchema.ReplicationSection.getRange();
- s.setRange(r);
- s.fetchColumn(MetadataSchema.ReplicationSection.COLF, new Text(tableId));
-
- Map<String,Status> fileToStatus = new HashMap<String,Status>();
- for (Entry<Key,Value> entry : s) {
- Text file = new Text();
- MetadataSchema.ReplicationSection.getFile(entry.getKey(), file);
- Status status = Status.parseFrom(entry.getValue().get());
- log.info("Got status for {}: {}", file, ProtobufUtil.toString(status));
- fileToStatus.put(file.toString(), status);
- }
-
- return fileToStatus;
- }
-
- @Test
- public void testActiveWalPrecludesClosing() throws Exception {
- final String table = getUniqueNames(1)[0];
- final Connector conn = getConnector();
-
- // Bring the replication table online first and foremost
- ReplicationTable.setOnline(conn);
-
- log.info("Creating {}", table);
- conn.tableOperations().create(table);
-
- conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
-
- log.info("Writing a few mutations to the table");
-
- BatchWriter bw = conn.createBatchWriter(table, null);
-
- byte[] empty = new byte[0];
- for (int i = 0; i < 5; i++) {
- Mutation m = new Mutation(Integer.toString(i));
- m.put(empty, empty, empty);
- bw.addMutation(m);
- }
-
- log.info("Flushing mutations to the server");
- bw.flush();
-
- log.info("Checking that metadata only has one WAL recorded for this table");
-
- Set<String> wals = getWalsForTable(table);
- Assert.assertEquals("Expected to only find two WALs for the table", 2, wals.size());
-
- log.info("Compacting the table which will remove all WALs from the tablets");
-
- // Flush our test table to remove the WAL references in it
- conn.tableOperations().flush(table, null, null, true);
- // Flush the metadata table too because it will have a reference to the WAL
- conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
-
- log.info("Waiting for replication table to come online");
-
- log.info("Fetching replication statuses from metadata table");
-
- Map<String,Status> fileToStatus = getMetadataStatusForTable(table);
-
- Assert.assertEquals("Expected to only find one replication status message", 1, fileToStatus.size());
-
- String walName = fileToStatus.keySet().iterator().next();
- wals.retainAll(fileToStatus.keySet());
- Assert.assertEquals(1, wals.size());
-
- Status status = fileToStatus.get(walName);
-
- Assert.assertEquals("Expected Status for file to not be closed", false, status.getClosed());
-
- Set<String> filesForTable = getFilesForTable(table);
- Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTable.size());
- log.info("Files for table before MajC: {}", filesForTable);
-
- // Issue a MajC to roll a new file in HDFS
- conn.tableOperations().compact(table, null, null, false, true);
-
- Set<String> filesForTableAfterCompaction = getFilesForTable(table);
-
- log.info("Files for table after MajC: {}", filesForTableAfterCompaction);
-
- Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTableAfterCompaction.size());
- Assert.assertNotEquals("Expected the files before and after compaction to differ", filesForTableAfterCompaction, filesForTable);
-
- // Use the rfile which was just replaced by the MajC to determine when the GC has ran
- Path fileToBeDeleted = new Path(filesForTable.iterator().next());
- FileSystem fs = getCluster().getFileSystem();
-
- boolean fileExists = fs.exists(fileToBeDeleted);
- while (fileExists) {
- log.info("File which should get deleted still exists: {}", fileToBeDeleted);
- Thread.sleep(2000);
- fileExists = fs.exists(fileToBeDeleted);
- }
-
- Map<String,Status> fileToStatusAfterMinc = getMetadataStatusForTable(table);
- Assert.assertEquals("Expected to still find only one replication status message: " + fileToStatusAfterMinc, 1, fileToStatusAfterMinc.size());
-
- Assert.assertEquals("Status before and after MinC should be identical", fileToStatus, fileToStatusAfterMinc);
- }
-
- @Test(timeout = 2 * 60 * 1000)
- public void testUnreferencedWalInTserverIsClosed() throws Exception {
- final String[] names = getUniqueNames(2);
- // `table` will be replicated, `otherTable` is only used to roll the WAL on the tserver
- final String table = names[0], otherTable = names[1];
- final Connector conn = getConnector();
-
- // Bring the replication table online first and foremost
- ReplicationTable.setOnline(conn);
-
- log.info("Creating {}", table);
- conn.tableOperations().create(table);
-
- conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
-
- log.info("Writing a few mutations to the table");
-
- BatchWriter bw = conn.createBatchWriter(table, null);
-
- byte[] empty = new byte[0];
- for (int i = 0; i < 5; i++) {
- Mutation m = new Mutation(Integer.toString(i));
- m.put(empty, empty, empty);
- bw.addMutation(m);
- }
-
- log.info("Flushing mutations to the server");
- bw.close();
-
- log.info("Checking that metadata only has one WAL recorded for this table");
-
- Set<String> wals = getWalsForTable(table);
- Assert.assertEquals("Expected to only find two WAL for the table", 2, wals.size());
-
- log.info("Compacting the table which will remove all WALs from the tablets");
-
- // Flush our test table to remove the WAL references in it
- conn.tableOperations().flush(table, null, null, true);
- // Flush the metadata table too because it will have a reference to the WAL
- conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
-
- log.info("Fetching replication statuses from metadata table");
-
- Map<String,Status> fileToStatus = getMetadataStatusForTable(table);
-
- Assert.assertEquals("Expected to only find one replication status message", 1, fileToStatus.size());
-
- String walName = fileToStatus.keySet().iterator().next();
- Assert.assertTrue("Expected log file name from tablet to equal replication entry", wals.contains(walName));
-
- Status status = fileToStatus.get(walName);
-
- Assert.assertEquals("Expected Status for file to not be closed", false, status.getClosed());
-
- Set<String> filesForTable = getFilesForTable(table);
- Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTable.size());
- log.info("Files for table before MajC: {}", filesForTable);
-
- // Issue a MajC to roll a new file in HDFS
- conn.tableOperations().compact(table, null, null, false, true);
-
- Set<String> filesForTableAfterCompaction = getFilesForTable(table);
-
- log.info("Files for table after MajC: {}", filesForTableAfterCompaction);
-
- Assert.assertEquals("Expected to only find one rfile for table", 1, filesForTableAfterCompaction.size());
- Assert.assertNotEquals("Expected the files before and after compaction to differ", filesForTableAfterCompaction, filesForTable);
-
- // Use the rfile which was just replaced by the MajC to determine when the GC has ran
- Path fileToBeDeleted = new Path(filesForTable.iterator().next());
- FileSystem fs = getCluster().getFileSystem();
-
- boolean fileExists = fs.exists(fileToBeDeleted);
- while (fileExists) {
- log.info("File which should get deleted still exists: {}", fileToBeDeleted);
- Thread.sleep(2000);
- fileExists = fs.exists(fileToBeDeleted);
- }
-
- // At this point in time, we *know* that the GarbageCollector has run which means that the Status
- // for our WAL should not be altered.
-
- Map<String,Status> fileToStatusAfterMinc = getMetadataStatusForTable(table);
- Assert.assertEquals("Expected to still find only one replication status message: " + fileToStatusAfterMinc, 1, fileToStatusAfterMinc.size());
-
- /*
- * To verify that the WALs is still getting closed, we have to force the tserver to close the existing WAL and open a new one instead. The easiest way to do
- * this is to write a load of data that will exceed the 1.33% full threshold that the logger keeps track of
- */
-
- conn.tableOperations().create(otherTable);
- bw = conn.createBatchWriter(otherTable, null);
- // 500k
- byte[] bigValue = new byte[1024 * 500];
- Arrays.fill(bigValue, (byte) 1);
- // 500k * 50
- for (int i = 0; i < 50; i++) {
- Mutation m = new Mutation(Integer.toString(i));
- m.put(empty, empty, bigValue);
- bw.addMutation(m);
- if (i % 10 == 0) {
- bw.flush();
- }
- }
-
- bw.close();
-
- conn.tableOperations().flush(otherTable, null, null, true);
-
- // Get the tservers which the master deems as active
- final ClientContext context = new ClientContext(conn.getInstance(), new Credentials("root", new PasswordToken(ConfigurableMacBase.ROOT_PASSWORD)),
- getClientConfig());
- List<String> tservers = MasterClient.execute(context, new ClientExecReturn<List<String>,MasterClientService.Client>() {
- @Override
- public List<String> execute(MasterClientService.Client client) throws Exception {
- return client.getActiveTservers(Tracer.traceInfo(), context.rpcCreds());
- }
- });
-
- Assert.assertEquals("Expected only one active tservers", 1, tservers.size());
-
- HostAndPort tserver = HostAndPort.fromString(tservers.get(0));
-
- // Get the active WALs from that server
- log.info("Fetching active WALs from {}", tserver);
-
- Client client = ThriftUtil.getTServerClient(tserver, context);
- List<String> activeWalsForTserver = client.getActiveLogs(Tracer.traceInfo(), context.rpcCreds());
-
- log.info("Active wals: {}", activeWalsForTserver);
-
- Assert.assertEquals("Expected to find only one active WAL", 1, activeWalsForTserver.size());
-
- String activeWal = new Path(activeWalsForTserver.get(0)).toString();
-
- Assert.assertNotEquals("Current active WAL on tserver should not be the original WAL we saw", walName, activeWal);
-
- log.info("Ensuring that replication status does get closed after WAL is no longer in use by Tserver");
-
- do {
- Map<String,Status> replicationStatuses = getMetadataStatusForTable(table);
-
- log.info("Got replication status messages {}", replicationStatuses);
- Assert.assertEquals("Did not expect to find additional status records", 1, replicationStatuses.size());
-
- status = replicationStatuses.values().iterator().next();
- log.info("Current status: {}", ProtobufUtil.toString(status));
-
- if (status.getClosed()) {
- return;
- }
-
- log.info("Status is not yet closed, waiting for garbage collector to close it");
-
- Thread.sleep(2000);
- } while (true);
- }
-}
[41/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java b/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
new file mode 100644
index 0000000..2474b3e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/CleanWalIT.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+
+public class CleanWalIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(CleanWalIT.class);
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "3s");
+ cfg.setNumTservers(1);
+ // use raw local file system so walogs sync and flush will work
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Before
+ public void offlineTraceTable() throws Exception {
+ Connector conn = getConnector();
+ String traceTable = conn.instanceOperations().getSystemConfiguration().get(Property.TRACE_TABLE.getKey());
+ if (conn.tableOperations().exists(traceTable)) {
+ conn.tableOperations().offline(traceTable, true);
+ }
+ }
+
+ @After
+ public void onlineTraceTable() throws Exception {
+ if (null != cluster) {
+ Connector conn = getConnector();
+ String traceTable = conn.instanceOperations().getSystemConfiguration().get(Property.TRACE_TABLE.getKey());
+ if (conn.tableOperations().exists(traceTable)) {
+ conn.tableOperations().online(traceTable, true);
+ }
+ }
+ }
+
+ // test for ACCUMULO-1830
+ @Test
+ public void test() throws Exception {
+ Connector conn = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ conn.tableOperations().create(tableName);
+ BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("row");
+ m.put("cf", "cq", "value");
+ bw.addMutation(m);
+ bw.close();
+ getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ // all 3 tables should do recovery, but the bug doesn't really remove the log file references
+
+ getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
+
+ for (String table : new String[] {MetadataTable.NAME, RootTable.NAME})
+ conn.tableOperations().flush(table, null, null, true);
+ log.debug("Checking entries for " + tableName);
+ assertEquals(1, count(tableName, conn));
+ for (String table : new String[] {MetadataTable.NAME, RootTable.NAME}) {
+ log.debug("Checking logs for " + table);
+ assertEquals("Found logs for " + table, 0, countLogs(table, conn));
+ }
+
+ bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ m = new Mutation("row");
+ m.putDelete("cf", "cq");
+ bw.addMutation(m);
+ bw.close();
+ assertEquals(0, count(tableName, conn));
+ conn.tableOperations().flush(tableName, null, null, true);
+ conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
+ conn.tableOperations().flush(RootTable.NAME, null, null, true);
+ try {
+ getCluster().getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ UtilWaitThread.sleep(3 * 1000);
+ } finally {
+ getCluster().getClusterControl().startAllServers(ServerType.TABLET_SERVER);
+ }
+ assertEquals(0, count(tableName, conn));
+ }
+
+ private int countLogs(String tableName, Connector conn) throws TableNotFoundException {
+ Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ scanner.fetchColumnFamily(MetadataSchema.TabletsSection.LogColumnFamily.NAME);
+ scanner.setRange(MetadataSchema.TabletsSection.getRange());
+ int count = 0;
+ for (Entry<Key,Value> entry : scanner) {
+ log.debug("Saw " + entry.getKey() + "=" + entry.getValue());
+ count++;
+ }
+ return count;
+ }
+
+ int count(String tableName, Connector conn) throws Exception {
+ Scanner s = conn.createScanner(tableName, Authorizations.EMPTY);
+ return Iterators.size(s.iterator());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
new file mode 100644
index 0000000..74d3593
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/ConditionalWriterIT.java
@@ -0,0 +1,1349 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.cluster.AccumuloCluster;
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.ConditionalWriter;
+import org.apache.accumulo.core.client.ConditionalWriter.Result;
+import org.apache.accumulo.core.client.ConditionalWriter.Status;
+import org.apache.accumulo.core.client.ConditionalWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IsolatedScanner;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.RowIterator;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableDeletedException;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.TableOfflineException;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.data.ArrayByteSequence;
+import org.apache.accumulo.core.data.ByteSequence;
+import org.apache.accumulo.core.data.Condition;
+import org.apache.accumulo.core.data.ConditionalMutation;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.LongCombiner.Type;
+import org.apache.accumulo.core.iterators.user.SummingCombiner;
+import org.apache.accumulo.core.iterators.user.VersioningIterator;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.security.SystemPermission;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.trace.DistributedTrace;
+import org.apache.accumulo.core.trace.Span;
+import org.apache.accumulo.core.trace.Trace;
+import org.apache.accumulo.core.util.FastFormat;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.test.functional.BadIterator;
+import org.apache.accumulo.test.functional.SlowIterator;
+import org.apache.accumulo.tracer.TraceDump;
+import org.apache.accumulo.tracer.TraceDump.Printer;
+import org.apache.accumulo.tracer.TraceServer;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterables;
+
+/**
+ *
+ */
+public class ConditionalWriterIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(ConditionalWriterIT.class);
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ public static long abs(long l) {
+ l = Math.abs(l); // abs(Long.MIN_VALUE) == Long.MIN_VALUE...
+ if (l < 0)
+ return 0;
+ return l;
+ }
+
+ @Before
+ public void deleteUsers() throws Exception {
+ Connector conn = getConnector();
+ Set<String> users = conn.securityOperations().listLocalUsers();
+ ClusterUser user = getUser(0);
+ if (users.contains(user.getPrincipal())) {
+ conn.securityOperations().dropLocalUser(user.getPrincipal());
+ }
+ }
+
+ @Test
+ public void testBasic() throws Exception {
+
+ Connector conn = getConnector();
+ String tableName = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(tableName);
+
+ ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig());
+
+ // mutation conditional on column tx:seq not existing
+ ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq"));
+ cm0.put("name", "last", "doe");
+ cm0.put("name", "first", "john");
+ cm0.put("tx", "seq", "1");
+ Assert.assertEquals(Status.ACCEPTED, cw.write(cm0).getStatus());
+ Assert.assertEquals(Status.REJECTED, cw.write(cm0).getStatus());
+
+ // mutation conditional on column tx:seq being 1
+ ConditionalMutation cm1 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("1"));
+ cm1.put("name", "last", "Doe");
+ cm1.put("tx", "seq", "2");
+ Assert.assertEquals(Status.ACCEPTED, cw.write(cm1).getStatus());
+
+ // test condition where value differs
+ ConditionalMutation cm2 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("1"));
+ cm2.put("name", "last", "DOE");
+ cm2.put("tx", "seq", "2");
+ Assert.assertEquals(Status.REJECTED, cw.write(cm2).getStatus());
+
+ // test condition where column does not exists
+ ConditionalMutation cm3 = new ConditionalMutation("99006", new Condition("txtypo", "seq").setValue("1"));
+ cm3.put("name", "last", "deo");
+ cm3.put("tx", "seq", "2");
+ Assert.assertEquals(Status.REJECTED, cw.write(cm3).getStatus());
+
+ // test two conditions, where one should fail
+ ConditionalMutation cm4 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("2"), new Condition("name", "last").setValue("doe"));
+ cm4.put("name", "last", "deo");
+ cm4.put("tx", "seq", "3");
+ Assert.assertEquals(Status.REJECTED, cw.write(cm4).getStatus());
+
+ // test two conditions, where one should fail
+ ConditionalMutation cm5 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("1"), new Condition("name", "last").setValue("Doe"));
+ cm5.put("name", "last", "deo");
+ cm5.put("tx", "seq", "3");
+ Assert.assertEquals(Status.REJECTED, cw.write(cm5).getStatus());
+
+ // ensure rejected mutations did not write
+ Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
+ scanner.fetchColumn(new Text("name"), new Text("last"));
+ scanner.setRange(new Range("99006"));
+ Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("Doe", entry.getValue().toString());
+
+ // test w/ two conditions that are met
+ ConditionalMutation cm6 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("2"), new Condition("name", "last").setValue("Doe"));
+ cm6.put("name", "last", "DOE");
+ cm6.put("tx", "seq", "3");
+ Assert.assertEquals(Status.ACCEPTED, cw.write(cm6).getStatus());
+
+ entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("DOE", entry.getValue().toString());
+
+ // test a conditional mutation that deletes
+ ConditionalMutation cm7 = new ConditionalMutation("99006", new Condition("tx", "seq").setValue("3"));
+ cm7.putDelete("name", "last");
+ cm7.putDelete("name", "first");
+ cm7.putDelete("tx", "seq");
+ Assert.assertEquals(Status.ACCEPTED, cw.write(cm7).getStatus());
+
+ Assert.assertFalse("Did not expect to find any results", scanner.iterator().hasNext());
+
+ // add the row back
+ Assert.assertEquals(Status.ACCEPTED, cw.write(cm0).getStatus());
+ Assert.assertEquals(Status.REJECTED, cw.write(cm0).getStatus());
+
+ entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("doe", entry.getValue().toString());
+ }
+
+ @Test
+ public void testFields() throws Exception {
+
+ Connector conn = getConnector();
+ String tableName = getUniqueNames(1)[0];
+
+ String user = null;
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ final boolean saslEnabled = clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false);
+
+ ClusterUser user1 = getUser(0);
+ user = user1.getPrincipal();
+ if (saslEnabled) {
+ // The token is pointless for kerberos
+ conn.securityOperations().createLocalUser(user, null);
+ } else {
+ conn.securityOperations().createLocalUser(user, new PasswordToken(user1.getPassword()));
+ }
+
+ Authorizations auths = new Authorizations("A", "B");
+
+ conn.securityOperations().changeUserAuthorizations(user, auths);
+ conn.securityOperations().grantSystemPermission(user, SystemPermission.CREATE_TABLE);
+
+ conn = conn.getInstance().getConnector(user, user1.getToken());
+
+ conn.tableOperations().create(tableName);
+
+ ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig().setAuthorizations(auths));
+
+ ColumnVisibility cva = new ColumnVisibility("A");
+ ColumnVisibility cvb = new ColumnVisibility("B");
+
+ ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cva));
+ cm0.put("name", "last", cva, "doe");
+ cm0.put("name", "first", cva, "john");
+ cm0.put("tx", "seq", cva, "1");
+ Assert.assertEquals(Status.ACCEPTED, cw.write(cm0).getStatus());
+
+ Scanner scanner = conn.createScanner(tableName, auths);
+ scanner.setRange(new Range("99006"));
+ // TODO verify all columns
+ scanner.fetchColumn(new Text("tx"), new Text("seq"));
+ Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("1", entry.getValue().toString());
+ long ts = entry.getKey().getTimestamp();
+
+ // test wrong colf
+ ConditionalMutation cm1 = new ConditionalMutation("99006", new Condition("txA", "seq").setVisibility(cva).setValue("1"));
+ cm1.put("name", "last", cva, "Doe");
+ cm1.put("name", "first", cva, "John");
+ cm1.put("tx", "seq", cva, "2");
+ Assert.assertEquals(Status.REJECTED, cw.write(cm1).getStatus());
+
+ // test wrong colq
+ ConditionalMutation cm2 = new ConditionalMutation("99006", new Condition("tx", "seqA").setVisibility(cva).setValue("1"));
+ cm2.put("name", "last", cva, "Doe");
+ cm2.put("name", "first", cva, "John");
+ cm2.put("tx", "seq", cva, "2");
+ Assert.assertEquals(Status.REJECTED, cw.write(cm2).getStatus());
+
+ // test wrong colv
+ ConditionalMutation cm3 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb).setValue("1"));
+ cm3.put("name", "last", cva, "Doe");
+ cm3.put("name", "first", cva, "John");
+ cm3.put("tx", "seq", cva, "2");
+ Assert.assertEquals(Status.REJECTED, cw.write(cm3).getStatus());
+
+ // test wrong timestamp
+ ConditionalMutation cm4 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cva).setTimestamp(ts + 1).setValue("1"));
+ cm4.put("name", "last", cva, "Doe");
+ cm4.put("name", "first", cva, "John");
+ cm4.put("tx", "seq", cva, "2");
+ Assert.assertEquals(Status.REJECTED, cw.write(cm4).getStatus());
+
+ // test wrong timestamp
+ ConditionalMutation cm5 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cva).setTimestamp(ts - 1).setValue("1"));
+ cm5.put("name", "last", cva, "Doe");
+ cm5.put("name", "first", cva, "John");
+ cm5.put("tx", "seq", cva, "2");
+ Assert.assertEquals(Status.REJECTED, cw.write(cm5).getStatus());
+
+ // ensure no updates were made
+ entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("1", entry.getValue().toString());
+
+ // set all columns correctly
+ ConditionalMutation cm6 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cva).setTimestamp(ts).setValue("1"));
+ cm6.put("name", "last", cva, "Doe");
+ cm6.put("name", "first", cva, "John");
+ cm6.put("tx", "seq", cva, "2");
+ Assert.assertEquals(Status.ACCEPTED, cw.write(cm6).getStatus());
+
+ entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("2", entry.getValue().toString());
+
+ }
+
+ @Test
+ public void testBadColVis() throws Exception {
+ // test when a user sets a col vis in a condition that can never be seen
+
+ Connector conn = getConnector();
+ String tableName = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(tableName);
+
+ Authorizations auths = new Authorizations("A", "B");
+
+ conn.securityOperations().changeUserAuthorizations(getAdminPrincipal(), auths);
+
+ Authorizations filteredAuths = new Authorizations("A");
+
+ ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig().setAuthorizations(filteredAuths));
+
+ ColumnVisibility cva = new ColumnVisibility("A");
+ ColumnVisibility cvb = new ColumnVisibility("B");
+ ColumnVisibility cvc = new ColumnVisibility("C");
+
+ // User has authorization, but didn't include it in the writer
+ ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb));
+ cm0.put("name", "last", cva, "doe");
+ cm0.put("name", "first", cva, "john");
+ cm0.put("tx", "seq", cva, "1");
+ Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm0).getStatus());
+
+ ConditionalMutation cm1 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb).setValue("1"));
+ cm1.put("name", "last", cva, "doe");
+ cm1.put("name", "first", cva, "john");
+ cm1.put("tx", "seq", cva, "1");
+ Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm1).getStatus());
+
+ // User does not have the authorization
+ ConditionalMutation cm2 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvc));
+ cm2.put("name", "last", cva, "doe");
+ cm2.put("name", "first", cva, "john");
+ cm2.put("tx", "seq", cva, "1");
+ Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm2).getStatus());
+
+ ConditionalMutation cm3 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvc).setValue("1"));
+ cm3.put("name", "last", cva, "doe");
+ cm3.put("name", "first", cva, "john");
+ cm3.put("tx", "seq", cva, "1");
+ Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm3).getStatus());
+
+ // if any visibility is bad, good visibilities don't override
+ ConditionalMutation cm4 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb), new Condition("tx", "seq").setVisibility(cva));
+
+ cm4.put("name", "last", cva, "doe");
+ cm4.put("name", "first", cva, "john");
+ cm4.put("tx", "seq", cva, "1");
+ Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm4).getStatus());
+
+ ConditionalMutation cm5 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb).setValue("1"), new Condition("tx", "seq")
+ .setVisibility(cva).setValue("1"));
+ cm5.put("name", "last", cva, "doe");
+ cm5.put("name", "first", cva, "john");
+ cm5.put("tx", "seq", cva, "1");
+ Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm5).getStatus());
+
+ ConditionalMutation cm6 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb).setValue("1"),
+ new Condition("tx", "seq").setVisibility(cva));
+ cm6.put("name", "last", cva, "doe");
+ cm6.put("name", "first", cva, "john");
+ cm6.put("tx", "seq", cva, "1");
+ Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm6).getStatus());
+
+ ConditionalMutation cm7 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb), new Condition("tx", "seq").setVisibility(cva)
+ .setValue("1"));
+ cm7.put("name", "last", cva, "doe");
+ cm7.put("name", "first", cva, "john");
+ cm7.put("tx", "seq", cva, "1");
+ Assert.assertEquals(Status.INVISIBLE_VISIBILITY, cw.write(cm7).getStatus());
+
+ cw.close();
+
+ // test passing auths that exceed users configured auths
+
+ Authorizations exceedingAuths = new Authorizations("A", "B", "D");
+ ConditionalWriter cw2 = conn.createConditionalWriter(tableName, new ConditionalWriterConfig().setAuthorizations(exceedingAuths));
+
+ ConditionalMutation cm8 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvb), new Condition("tx", "seq").setVisibility(cva)
+ .setValue("1"));
+ cm8.put("name", "last", cva, "doe");
+ cm8.put("name", "first", cva, "john");
+ cm8.put("tx", "seq", cva, "1");
+
+ try {
+ Status status = cw2.write(cm8).getStatus();
+ Assert.fail("Writing mutation with Authorizations the user doesn't have should fail. Got status: " + status);
+ } catch (AccumuloSecurityException ase) {
+ // expected, check specific failure?
+ } finally {
+ cw2.close();
+ }
+ }
+
+ @Test
+ public void testConstraints() throws Exception {
+ // ensure constraint violations are properly reported
+
+ Connector conn = getConnector();
+ String tableName = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(tableName);
+ conn.tableOperations().addConstraint(tableName, AlphaNumKeyConstraint.class.getName());
+ conn.tableOperations().clone(tableName, tableName + "_clone", true, new HashMap<String,String>(), new HashSet<String>());
+
+ Scanner scanner = conn.createScanner(tableName + "_clone", new Authorizations());
+
+ ConditionalWriter cw = conn.createConditionalWriter(tableName + "_clone", new ConditionalWriterConfig());
+
+ ConditionalMutation cm0 = new ConditionalMutation("99006+", new Condition("tx", "seq"));
+ cm0.put("tx", "seq", "1");
+
+ Assert.assertEquals(Status.VIOLATED, cw.write(cm0).getStatus());
+ Assert.assertFalse("Should find no results in the table is mutation result was violated", scanner.iterator().hasNext());
+
+ ConditionalMutation cm1 = new ConditionalMutation("99006", new Condition("tx", "seq"));
+ cm1.put("tx", "seq", "1");
+
+ Assert.assertEquals(Status.ACCEPTED, cw.write(cm1).getStatus());
+ Assert.assertTrue("Accepted result should be returned when reading table", scanner.iterator().hasNext());
+
+ cw.close();
+ }
+
+ @Test
+ public void testIterators() throws Exception {
+
+ Connector conn = getConnector();
+ String tableName = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
+
+ BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+
+ Mutation m = new Mutation("ACCUMULO-1000");
+ m.put("count", "comments", "1");
+ bw.addMutation(m);
+ bw.addMutation(m);
+ bw.addMutation(m);
+
+ m = new Mutation("ACCUMULO-1001");
+ m.put("count2", "comments", "1");
+ bw.addMutation(m);
+ bw.addMutation(m);
+
+ m = new Mutation("ACCUMULO-1002");
+ m.put("count2", "comments", "1");
+ bw.addMutation(m);
+ bw.addMutation(m);
+
+ bw.close();
+
+ IteratorSetting iterConfig = new IteratorSetting(10, SummingCombiner.class);
+ SummingCombiner.setEncodingType(iterConfig, Type.STRING);
+ SummingCombiner.setColumns(iterConfig, Collections.singletonList(new IteratorSetting.Column("count")));
+
+ IteratorSetting iterConfig2 = new IteratorSetting(10, SummingCombiner.class);
+ SummingCombiner.setEncodingType(iterConfig2, Type.STRING);
+ SummingCombiner.setColumns(iterConfig2, Collections.singletonList(new IteratorSetting.Column("count2", "comments")));
+
+ IteratorSetting iterConfig3 = new IteratorSetting(5, VersioningIterator.class);
+ VersioningIterator.setMaxVersions(iterConfig3, 1);
+
+ Scanner scanner = conn.createScanner(tableName, new Authorizations());
+ scanner.addScanIterator(iterConfig);
+ scanner.setRange(new Range("ACCUMULO-1000"));
+ scanner.fetchColumn(new Text("count"), new Text("comments"));
+
+ Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("3", entry.getValue().toString());
+
+ ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig());
+
+ ConditionalMutation cm0 = new ConditionalMutation("ACCUMULO-1000", new Condition("count", "comments").setValue("3"));
+ cm0.put("count", "comments", "1");
+ Assert.assertEquals(Status.REJECTED, cw.write(cm0).getStatus());
+ entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("3", entry.getValue().toString());
+
+ ConditionalMutation cm1 = new ConditionalMutation("ACCUMULO-1000", new Condition("count", "comments").setIterators(iterConfig).setValue("3"));
+ cm1.put("count", "comments", "1");
+ Assert.assertEquals(Status.ACCEPTED, cw.write(cm1).getStatus());
+ entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("4", entry.getValue().toString());
+
+ ConditionalMutation cm2 = new ConditionalMutation("ACCUMULO-1000", new Condition("count", "comments").setValue("4"));
+ cm2.put("count", "comments", "1");
+ Assert.assertEquals(Status.REJECTED, cw.write(cm1).getStatus());
+ entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("4", entry.getValue().toString());
+
+ // run test with multiple iterators passed in same batch and condition with two iterators
+
+ ConditionalMutation cm3 = new ConditionalMutation("ACCUMULO-1000", new Condition("count", "comments").setIterators(iterConfig).setValue("4"));
+ cm3.put("count", "comments", "1");
+
+ ConditionalMutation cm4 = new ConditionalMutation("ACCUMULO-1001", new Condition("count2", "comments").setIterators(iterConfig2).setValue("2"));
+ cm4.put("count2", "comments", "1");
+
+ ConditionalMutation cm5 = new ConditionalMutation("ACCUMULO-1002", new Condition("count2", "comments").setIterators(iterConfig2, iterConfig3).setValue("2"));
+ cm5.put("count2", "comments", "1");
+
+ Iterator<Result> results = cw.write(Arrays.asList(cm3, cm4, cm5).iterator());
+ Map<String,Status> actual = new HashMap<String,Status>();
+
+ while (results.hasNext()) {
+ Result result = results.next();
+ String k = new String(result.getMutation().getRow());
+ Assert.assertFalse("Did not expect to see multiple resultus for the row: " + k, actual.containsKey(k));
+ actual.put(k, result.getStatus());
+ }
+
+ Map<String,Status> expected = new HashMap<String,Status>();
+ expected.put("ACCUMULO-1000", Status.ACCEPTED);
+ expected.put("ACCUMULO-1001", Status.ACCEPTED);
+ expected.put("ACCUMULO-1002", Status.REJECTED);
+
+ Assert.assertEquals(expected, actual);
+
+ // TODO test w/ table that has iterators configured
+
+ cw.close();
+ }
+
+ @Test
+ public void testBatch() throws Exception {
+
+ Connector conn = getConnector();
+ String tableName = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(tableName);
+
+ conn.securityOperations().changeUserAuthorizations(getAdminPrincipal(), new Authorizations("A", "B"));
+
+ ColumnVisibility cvab = new ColumnVisibility("A|B");
+
+ ArrayList<ConditionalMutation> mutations = new ArrayList<ConditionalMutation>();
+
+ ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvab));
+ cm0.put("name", "last", cvab, "doe");
+ cm0.put("name", "first", cvab, "john");
+ cm0.put("tx", "seq", cvab, "1");
+ mutations.add(cm0);
+
+ ConditionalMutation cm1 = new ConditionalMutation("59056", new Condition("tx", "seq").setVisibility(cvab));
+ cm1.put("name", "last", cvab, "doe");
+ cm1.put("name", "first", cvab, "jane");
+ cm1.put("tx", "seq", cvab, "1");
+ mutations.add(cm1);
+
+ ConditionalMutation cm2 = new ConditionalMutation("19059", new Condition("tx", "seq").setVisibility(cvab));
+ cm2.put("name", "last", cvab, "doe");
+ cm2.put("name", "first", cvab, "jack");
+ cm2.put("tx", "seq", cvab, "1");
+ mutations.add(cm2);
+
+ ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig().setAuthorizations(new Authorizations("A")));
+ Iterator<Result> results = cw.write(mutations.iterator());
+ int count = 0;
+ while (results.hasNext()) {
+ Result result = results.next();
+ Assert.assertEquals(Status.ACCEPTED, result.getStatus());
+ count++;
+ }
+
+ Assert.assertEquals(3, count);
+
+ Scanner scanner = conn.createScanner(tableName, new Authorizations("A"));
+ scanner.fetchColumn(new Text("tx"), new Text("seq"));
+
+ for (String row : new String[] {"99006", "59056", "19059"}) {
+ scanner.setRange(new Range(row));
+ Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("1", entry.getValue().toString());
+ }
+
+ TreeSet<Text> splits = new TreeSet<Text>();
+ splits.add(new Text("7"));
+ splits.add(new Text("3"));
+ conn.tableOperations().addSplits(tableName, splits);
+
+ mutations.clear();
+
+ ConditionalMutation cm3 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvab).setValue("1"));
+ cm3.put("name", "last", cvab, "Doe");
+ cm3.put("tx", "seq", cvab, "2");
+ mutations.add(cm3);
+
+ ConditionalMutation cm4 = new ConditionalMutation("59056", new Condition("tx", "seq").setVisibility(cvab));
+ cm4.put("name", "last", cvab, "Doe");
+ cm4.put("tx", "seq", cvab, "1");
+ mutations.add(cm4);
+
+ ConditionalMutation cm5 = new ConditionalMutation("19059", new Condition("tx", "seq").setVisibility(cvab).setValue("2"));
+ cm5.put("name", "last", cvab, "Doe");
+ cm5.put("tx", "seq", cvab, "3");
+ mutations.add(cm5);
+
+ results = cw.write(mutations.iterator());
+ int accepted = 0;
+ int rejected = 0;
+ while (results.hasNext()) {
+ Result result = results.next();
+ if (new String(result.getMutation().getRow()).equals("99006")) {
+ Assert.assertEquals(Status.ACCEPTED, result.getStatus());
+ accepted++;
+ } else {
+ Assert.assertEquals(Status.REJECTED, result.getStatus());
+ rejected++;
+ }
+ }
+
+ Assert.assertEquals("Expected only one accepted conditional mutation", 1, accepted);
+ Assert.assertEquals("Expected two rejected conditional mutations", 2, rejected);
+
+ for (String row : new String[] {"59056", "19059"}) {
+ scanner.setRange(new Range(row));
+ Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("1", entry.getValue().toString());
+ }
+
+ scanner.setRange(new Range("99006"));
+ Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("2", entry.getValue().toString());
+
+ scanner.clearColumns();
+ scanner.fetchColumn(new Text("name"), new Text("last"));
+ entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("Doe", entry.getValue().toString());
+
+ cw.close();
+ }
+
+ @Test
+ public void testBigBatch() throws Exception {
+
+ Connector conn = getConnector();
+ String tableName = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(tableName);
+ conn.tableOperations().addSplits(tableName, nss("2", "4", "6"));
+
+ UtilWaitThread.sleep(2000);
+
+ int num = 100;
+
+ ArrayList<byte[]> rows = new ArrayList<byte[]>(num);
+ ArrayList<ConditionalMutation> cml = new ArrayList<ConditionalMutation>(num);
+
+ Random r = new Random();
+ byte[] e = new byte[0];
+
+ for (int i = 0; i < num; i++) {
+ rows.add(FastFormat.toZeroPaddedString(abs(r.nextLong()), 16, 16, e));
+ }
+
+ for (int i = 0; i < num; i++) {
+ ConditionalMutation cm = new ConditionalMutation(rows.get(i), new Condition("meta", "seq"));
+
+ cm.put("meta", "seq", "1");
+ cm.put("meta", "tx", UUID.randomUUID().toString());
+
+ cml.add(cm);
+ }
+
+ ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig());
+
+ Iterator<Result> results = cw.write(cml.iterator());
+
+ int count = 0;
+
+ // TODO check got each row back
+ while (results.hasNext()) {
+ Result result = results.next();
+ Assert.assertEquals(Status.ACCEPTED, result.getStatus());
+ count++;
+ }
+
+ Assert.assertEquals("Did not receive the expected number of results", num, count);
+
+ ArrayList<ConditionalMutation> cml2 = new ArrayList<ConditionalMutation>(num);
+
+ for (int i = 0; i < num; i++) {
+ ConditionalMutation cm = new ConditionalMutation(rows.get(i), new Condition("meta", "seq").setValue("1"));
+
+ cm.put("meta", "seq", "2");
+ cm.put("meta", "tx", UUID.randomUUID().toString());
+
+ cml2.add(cm);
+ }
+
+ count = 0;
+
+ results = cw.write(cml2.iterator());
+
+ while (results.hasNext()) {
+ Result result = results.next();
+ Assert.assertEquals(Status.ACCEPTED, result.getStatus());
+ count++;
+ }
+
+ Assert.assertEquals("Did not receive the expected number of results", num, count);
+
+ cw.close();
+ }
+
+ @Test
+ public void testBatchErrors() throws Exception {
+
+ Connector conn = getConnector();
+ String tableName = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(tableName);
+ conn.tableOperations().addConstraint(tableName, AlphaNumKeyConstraint.class.getName());
+ conn.tableOperations().clone(tableName, tableName + "_clone", true, new HashMap<String,String>(), new HashSet<String>());
+
+ conn.securityOperations().changeUserAuthorizations(getAdminPrincipal(), new Authorizations("A", "B"));
+
+ ColumnVisibility cvaob = new ColumnVisibility("A|B");
+ ColumnVisibility cvaab = new ColumnVisibility("A&B");
+
+ switch ((new Random()).nextInt(3)) {
+ case 1:
+ conn.tableOperations().addSplits(tableName, nss("6"));
+ break;
+ case 2:
+ conn.tableOperations().addSplits(tableName, nss("2", "95"));
+ break;
+ }
+
+ ArrayList<ConditionalMutation> mutations = new ArrayList<ConditionalMutation>();
+
+ ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq").setVisibility(cvaob));
+ cm0.put("name+", "last", cvaob, "doe");
+ cm0.put("name", "first", cvaob, "john");
+ cm0.put("tx", "seq", cvaob, "1");
+ mutations.add(cm0);
+
+ ConditionalMutation cm1 = new ConditionalMutation("59056", new Condition("tx", "seq").setVisibility(cvaab));
+ cm1.put("name", "last", cvaab, "doe");
+ cm1.put("name", "first", cvaab, "jane");
+ cm1.put("tx", "seq", cvaab, "1");
+ mutations.add(cm1);
+
+ ConditionalMutation cm2 = new ConditionalMutation("19059", new Condition("tx", "seq").setVisibility(cvaob));
+ cm2.put("name", "last", cvaob, "doe");
+ cm2.put("name", "first", cvaob, "jack");
+ cm2.put("tx", "seq", cvaob, "1");
+ mutations.add(cm2);
+
+ ConditionalMutation cm3 = new ConditionalMutation("90909", new Condition("tx", "seq").setVisibility(cvaob).setValue("1"));
+ cm3.put("name", "last", cvaob, "doe");
+ cm3.put("name", "first", cvaob, "john");
+ cm3.put("tx", "seq", cvaob, "2");
+ mutations.add(cm3);
+
+ ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig().setAuthorizations(new Authorizations("A")));
+ Iterator<Result> results = cw.write(mutations.iterator());
+ HashSet<String> rows = new HashSet<String>();
+ while (results.hasNext()) {
+ Result result = results.next();
+ String row = new String(result.getMutation().getRow());
+ if (row.equals("19059")) {
+ Assert.assertEquals(Status.ACCEPTED, result.getStatus());
+ } else if (row.equals("59056")) {
+ Assert.assertEquals(Status.INVISIBLE_VISIBILITY, result.getStatus());
+ } else if (row.equals("99006")) {
+ Assert.assertEquals(Status.VIOLATED, result.getStatus());
+ } else if (row.equals("90909")) {
+ Assert.assertEquals(Status.REJECTED, result.getStatus());
+ }
+ rows.add(row);
+ }
+
+ Assert.assertEquals(4, rows.size());
+
+ Scanner scanner = conn.createScanner(tableName, new Authorizations("A"));
+ scanner.fetchColumn(new Text("tx"), new Text("seq"));
+
+ Entry<Key,Value> entry = Iterables.getOnlyElement(scanner);
+ Assert.assertEquals("1", entry.getValue().toString());
+
+ cw.close();
+ }
+
+ @Test
+ public void testSameRow() throws Exception {
+ // test multiple mutations for same row in same batch
+
+ Connector conn = getConnector();
+ String tableName = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(tableName);
+
+ ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig());
+
+ ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
+ cm1.put("tx", "seq", "1");
+ cm1.put("data", "x", "a");
+
+ Assert.assertEquals(Status.ACCEPTED, cw.write(cm1).getStatus());
+
+ ConditionalMutation cm2 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1"));
+ cm2.put("tx", "seq", "2");
+ cm2.put("data", "x", "b");
+
+ ConditionalMutation cm3 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1"));
+ cm3.put("tx", "seq", "2");
+ cm3.put("data", "x", "c");
+
+ ConditionalMutation cm4 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1"));
+ cm4.put("tx", "seq", "2");
+ cm4.put("data", "x", "d");
+
+ Iterator<Result> results = cw.write(Arrays.asList(cm2, cm3, cm4).iterator());
+
+ int accepted = 0;
+ int rejected = 0;
+ int total = 0;
+
+ while (results.hasNext()) {
+ Status status = results.next().getStatus();
+ if (status == Status.ACCEPTED)
+ accepted++;
+ if (status == Status.REJECTED)
+ rejected++;
+ total++;
+ }
+
+ Assert.assertEquals("Expected one accepted result", 1, accepted);
+ Assert.assertEquals("Expected two rejected results", 2, rejected);
+ Assert.assertEquals("Expected three total results", 3, total);
+
+ cw.close();
+ }
+
+ private static class Stats {
+
+ ByteSequence row = null;
+ int seq;
+ long sum;
+ int data[] = new int[10];
+
+ public Stats(Iterator<Entry<Key,Value>> iterator) {
+ while (iterator.hasNext()) {
+ Entry<Key,Value> entry = iterator.next();
+
+ if (row == null)
+ row = entry.getKey().getRowData();
+
+ String cf = entry.getKey().getColumnFamilyData().toString();
+ String cq = entry.getKey().getColumnQualifierData().toString();
+
+ if (cf.equals("data")) {
+ data[Integer.parseInt(cq)] = Integer.parseInt(entry.getValue().toString());
+ } else if (cf.equals("meta")) {
+ if (cq.equals("sum")) {
+ sum = Long.parseLong(entry.getValue().toString());
+ } else if (cq.equals("seq")) {
+ seq = Integer.parseInt(entry.getValue().toString());
+ }
+ }
+ }
+
+ long sum2 = 0;
+
+ for (int datum : data) {
+ sum2 += datum;
+ }
+
+ Assert.assertEquals(sum2, sum);
+ }
+
+ public Stats(ByteSequence row) {
+ this.row = row;
+ for (int i = 0; i < data.length; i++) {
+ this.data[i] = 0;
+ }
+ this.seq = -1;
+ this.sum = 0;
+ }
+
+ void set(int index, int value) {
+ sum -= data[index];
+ sum += value;
+ data[index] = value;
+ }
+
+ ConditionalMutation toMutation() {
+ Condition cond = new Condition("meta", "seq");
+ if (seq >= 0)
+ cond.setValue(seq + "");
+
+ ConditionalMutation cm = new ConditionalMutation(row, cond);
+
+ cm.put("meta", "seq", (seq + 1) + "");
+ cm.put("meta", "sum", (sum) + "");
+
+ for (int i = 0; i < data.length; i++) {
+ cm.put("data", i + "", data[i] + "");
+ }
+
+ return cm;
+ }
+
+ @Override
+ public String toString() {
+ return row + " " + seq + " " + sum;
+ }
+ }
+
+ private static class MutatorTask implements Runnable {
+ String table;
+ ArrayList<ByteSequence> rows;
+ ConditionalWriter cw;
+ Connector conn;
+ AtomicBoolean failed;
+
+ public MutatorTask(String table, Connector conn, ArrayList<ByteSequence> rows, ConditionalWriter cw, AtomicBoolean failed) {
+ this.table = table;
+ this.rows = rows;
+ this.conn = conn;
+ this.cw = cw;
+ this.failed = failed;
+ }
+
+ @Override
+ public void run() {
+ try {
+ Random rand = new Random();
+
+ Scanner scanner = new IsolatedScanner(conn.createScanner(table, Authorizations.EMPTY));
+
+ for (int i = 0; i < 20; i++) {
+ int numRows = rand.nextInt(10) + 1;
+
+ ArrayList<ByteSequence> changes = new ArrayList<ByteSequence>(numRows);
+ ArrayList<ConditionalMutation> mutations = new ArrayList<ConditionalMutation>();
+
+ for (int j = 0; j < numRows; j++)
+ changes.add(rows.get(rand.nextInt(rows.size())));
+
+ for (ByteSequence row : changes) {
+ scanner.setRange(new Range(row.toString()));
+ Stats stats = new Stats(scanner.iterator());
+ stats.set(rand.nextInt(10), rand.nextInt(Integer.MAX_VALUE));
+ mutations.add(stats.toMutation());
+ }
+
+ ArrayList<ByteSequence> changed = new ArrayList<ByteSequence>(numRows);
+ Iterator<Result> results = cw.write(mutations.iterator());
+ while (results.hasNext()) {
+ Result result = results.next();
+ changed.add(new ArrayByteSequence(result.getMutation().getRow()));
+ }
+
+ Collections.sort(changes);
+ Collections.sort(changed);
+
+ Assert.assertEquals(changes, changed);
+
+ }
+
+ } catch (Exception e) {
+ log.error("{}", e.getMessage(), e);
+ failed.set(true);
+ }
+ }
+ }
+
+ @Test
+ public void testThreads() throws Exception {
+ // test multiple threads using a single conditional writer
+
+ String table = getUniqueNames(1)[0];
+ Connector conn = getConnector();
+
+ conn.tableOperations().create(table);
+
+ Random rand = new Random();
+
+ switch (rand.nextInt(3)) {
+ case 1:
+ conn.tableOperations().addSplits(table, nss("4"));
+ break;
+ case 2:
+ conn.tableOperations().addSplits(table, nss("3", "5"));
+ break;
+ }
+
+ ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
+
+ ArrayList<ByteSequence> rows = new ArrayList<ByteSequence>();
+
+ for (int i = 0; i < 1000; i++) {
+ rows.add(new ArrayByteSequence(FastFormat.toZeroPaddedString(abs(rand.nextLong()), 16, 16, new byte[0])));
+ }
+
+ ArrayList<ConditionalMutation> mutations = new ArrayList<ConditionalMutation>();
+
+ for (ByteSequence row : rows)
+ mutations.add(new Stats(row).toMutation());
+
+ ArrayList<ByteSequence> rows2 = new ArrayList<ByteSequence>();
+ Iterator<Result> results = cw.write(mutations.iterator());
+ while (results.hasNext()) {
+ Result result = results.next();
+ Assert.assertEquals(Status.ACCEPTED, result.getStatus());
+ rows2.add(new ArrayByteSequence(result.getMutation().getRow()));
+ }
+
+ Collections.sort(rows);
+ Collections.sort(rows2);
+
+ Assert.assertEquals(rows, rows2);
+
+ AtomicBoolean failed = new AtomicBoolean(false);
+
+ ExecutorService tp = Executors.newFixedThreadPool(5);
+ for (int i = 0; i < 5; i++) {
+ tp.submit(new MutatorTask(table, conn, rows, cw, failed));
+ }
+
+ tp.shutdown();
+
+ while (!tp.isTerminated()) {
+ tp.awaitTermination(1, TimeUnit.MINUTES);
+ }
+
+ Assert.assertFalse("A MutatorTask failed with an exception", failed.get());
+
+ Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
+
+ RowIterator rowIter = new RowIterator(scanner);
+
+ while (rowIter.hasNext()) {
+ Iterator<Entry<Key,Value>> row = rowIter.next();
+ new Stats(row);
+ }
+ }
+
+ private SortedSet<Text> nss(String... splits) {
+ TreeSet<Text> ret = new TreeSet<Text>();
+ for (String split : splits)
+ ret.add(new Text(split));
+
+ return ret;
+ }
+
+ @Test
+ public void testSecurity() throws Exception {
+ // test against table user does not have read and/or write permissions for
+ Connector conn = getConnector();
+ String user = null;
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ final boolean saslEnabled = clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false);
+
+ // Create a new user
+ ClusterUser user1 = getUser(0);
+ user = user1.getPrincipal();
+ if (saslEnabled) {
+ conn.securityOperations().createLocalUser(user, null);
+ } else {
+ conn.securityOperations().createLocalUser(user, new PasswordToken(user1.getPassword()));
+ }
+
+ String[] tables = getUniqueNames(3);
+ String table1 = tables[0], table2 = tables[1], table3 = tables[2];
+
+ // Create three tables
+ conn.tableOperations().create(table1);
+ conn.tableOperations().create(table2);
+ conn.tableOperations().create(table3);
+
+ // Grant R on table1, W on table2, R/W on table3
+ conn.securityOperations().grantTablePermission(user, table1, TablePermission.READ);
+ conn.securityOperations().grantTablePermission(user, table2, TablePermission.WRITE);
+ conn.securityOperations().grantTablePermission(user, table3, TablePermission.READ);
+ conn.securityOperations().grantTablePermission(user, table3, TablePermission.WRITE);
+
+ // Login as the user
+ Connector conn2 = conn.getInstance().getConnector(user, user1.getToken());
+
+ ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
+ cm1.put("tx", "seq", "1");
+ cm1.put("data", "x", "a");
+
+ ConditionalWriter cw1 = conn2.createConditionalWriter(table1, new ConditionalWriterConfig());
+ ConditionalWriter cw2 = conn2.createConditionalWriter(table2, new ConditionalWriterConfig());
+ ConditionalWriter cw3 = conn2.createConditionalWriter(table3, new ConditionalWriterConfig());
+
+ // Should be able to conditional-update a table we have R/W on
+ Assert.assertEquals(Status.ACCEPTED, cw3.write(cm1).getStatus());
+
+ // Conditional-update to a table we only have read on should fail
+ try {
+ Status status = cw1.write(cm1).getStatus();
+ Assert.fail("Expected exception writing conditional mutation to table the user doesn't have write access to, Got status: " + status);
+ } catch (AccumuloSecurityException ase) {
+
+ }
+
+ // Conditional-update to a table we only have writer on should fail
+ try {
+ Status status = cw2.write(cm1).getStatus();
+ Assert.fail("Expected exception writing conditional mutation to table the user doesn't have read access to. Got status: " + status);
+ } catch (AccumuloSecurityException ase) {
+
+ }
+ }
+
+ @Test
+ public void testTimeout() throws Exception {
+ Connector conn = getConnector();
+
+ String table = getUniqueNames(1)[0];
+
+ conn.tableOperations().create(table);
+
+ ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig().setTimeout(3, TimeUnit.SECONDS));
+
+ ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
+ cm1.put("tx", "seq", "1");
+ cm1.put("data", "x", "a");
+
+ Assert.assertEquals(cw.write(cm1).getStatus(), Status.ACCEPTED);
+
+ IteratorSetting is = new IteratorSetting(5, SlowIterator.class);
+ SlowIterator.setSeekSleepTime(is, 5000);
+
+ ConditionalMutation cm2 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1").setIterators(is));
+ cm2.put("tx", "seq", "2");
+ cm2.put("data", "x", "b");
+
+ Assert.assertEquals(cw.write(cm2).getStatus(), Status.UNKNOWN);
+
+ Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
+
+ for (Entry<Key,Value> entry : scanner) {
+ String cf = entry.getKey().getColumnFamilyData().toString();
+ String cq = entry.getKey().getColumnQualifierData().toString();
+ String val = entry.getValue().toString();
+
+ if (cf.equals("tx") && cq.equals("seq"))
+ Assert.assertEquals("Unexpected value in tx:seq", "1", val);
+ else if (cf.equals("data") && cq.equals("x"))
+ Assert.assertEquals("Unexpected value in data:x", "a", val);
+ else
+ Assert.fail("Saw unexpected column family and qualifier: " + entry);
+ }
+
+ ConditionalMutation cm3 = new ConditionalMutation("r1", new Condition("tx", "seq").setValue("1"));
+ cm3.put("tx", "seq", "2");
+ cm3.put("data", "x", "b");
+
+ Assert.assertEquals(cw.write(cm3).getStatus(), Status.ACCEPTED);
+
+ cw.close();
+ }
+
+ @Test
+ public void testDeleteTable() throws Exception {
+ String table = getUniqueNames(1)[0];
+ Connector conn = getConnector();
+
+ try {
+ conn.createConditionalWriter(table, new ConditionalWriterConfig());
+ Assert.fail("Creating conditional writer for table that doesn't exist should fail");
+ } catch (TableNotFoundException e) {}
+
+ conn.tableOperations().create(table);
+
+ ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
+
+ conn.tableOperations().delete(table);
+
+ ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
+ cm1.put("tx", "seq", "1");
+ cm1.put("data", "x", "a");
+
+ Result result = cw.write(cm1);
+
+ try {
+ Status status = result.getStatus();
+ Assert.fail("Expected exception writing conditional mutation to deleted table. Got status: " + status);
+ } catch (AccumuloException ae) {
+ Assert.assertEquals(TableDeletedException.class, ae.getCause().getClass());
+ }
+ }
+
+ @Test
+ public void testOffline() throws Exception {
+ String table = getUniqueNames(1)[0];
+ Connector conn = getConnector();
+
+ conn.tableOperations().create(table);
+
+ ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
+
+ conn.tableOperations().offline(table, true);
+
+ ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq"));
+ cm1.put("tx", "seq", "1");
+ cm1.put("data", "x", "a");
+
+ Result result = cw.write(cm1);
+
+ try {
+ Status status = result.getStatus();
+ Assert.fail("Expected exception writing conditional mutation to offline table. Got status: " + status);
+ } catch (AccumuloException ae) {
+ Assert.assertEquals(TableOfflineException.class, ae.getCause().getClass());
+ }
+
+ cw.close();
+
+ try {
+ conn.createConditionalWriter(table, new ConditionalWriterConfig());
+ Assert.fail("Expected exception creating conditional writer to offline table");
+ } catch (TableOfflineException e) {}
+ }
+
+ @Test
+ public void testError() throws Exception {
+ String table = getUniqueNames(1)[0];
+ Connector conn = getConnector();
+
+ conn.tableOperations().create(table);
+
+ ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
+
+ IteratorSetting iterSetting = new IteratorSetting(5, BadIterator.class);
+
+ ConditionalMutation cm1 = new ConditionalMutation("r1", new Condition("tx", "seq").setIterators(iterSetting));
+ cm1.put("tx", "seq", "1");
+ cm1.put("data", "x", "a");
+
+ Result result = cw.write(cm1);
+
+ try {
+ Status status = result.getStatus();
+ Assert.fail("Expected exception using iterator which throws an error, Got status: " + status);
+ } catch (AccumuloException ae) {
+
+ }
+
+ cw.close();
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void testNoConditions() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
+ String table = getUniqueNames(1)[0];
+ Connector conn = getConnector();
+
+ conn.tableOperations().create(table);
+
+ ConditionalWriter cw = conn.createConditionalWriter(table, new ConditionalWriterConfig());
+
+ ConditionalMutation cm1 = new ConditionalMutation("r1");
+ cm1.put("tx", "seq", "1");
+ cm1.put("data", "x", "a");
+
+ cw.write(cm1);
+ }
+
+ @Test
+ public void testTrace() throws Exception {
+ // Need to add a getClientConfig() to AccumuloCluster
+ Assume.assumeTrue(getClusterType() == ClusterType.MINI);
+ Process tracer = null;
+ Connector conn = getConnector();
+ AccumuloCluster cluster = getCluster();
+ MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
+ if (!conn.tableOperations().exists("trace")) {
+ tracer = mac.exec(TraceServer.class);
+ while (!conn.tableOperations().exists("trace")) {
+ UtilWaitThread.sleep(1000);
+ }
+ }
+
+ String tableName = getUniqueNames(1)[0];
+ conn.tableOperations().create(tableName);
+
+ DistributedTrace.enable("localhost", "testTrace", mac.getClientConfig());
+ UtilWaitThread.sleep(1000);
+ Span root = Trace.on("traceTest");
+ ConditionalWriter cw = conn.createConditionalWriter(tableName, new ConditionalWriterConfig());
+
+ // mutation conditional on column tx:seq not exiting
+ ConditionalMutation cm0 = new ConditionalMutation("99006", new Condition("tx", "seq"));
+ cm0.put("name", "last", "doe");
+ cm0.put("name", "first", "john");
+ cm0.put("tx", "seq", "1");
+ Assert.assertEquals(Status.ACCEPTED, cw.write(cm0).getStatus());
+ root.stop();
+
+ final Scanner scanner = conn.createScanner("trace", Authorizations.EMPTY);
+ scanner.setRange(new Range(new Text(Long.toHexString(root.traceId()))));
+ loop: while (true) {
+ final StringBuffer finalBuffer = new StringBuffer();
+ int traceCount = TraceDump.printTrace(scanner, new Printer() {
+ @Override
+ public void print(final String line) {
+ try {
+ finalBuffer.append(line).append("\n");
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ });
+ String traceOutput = finalBuffer.toString();
+ log.info("Trace output:" + traceOutput);
+ if (traceCount > 0) {
+ int lastPos = 0;
+ for (String part : "traceTest, startScan,startConditionalUpdate,conditionalUpdate,Check conditions,apply conditional mutations".split(",")) {
+ log.info("Looking in trace output for '" + part + "'");
+ int pos = traceOutput.indexOf(part);
+ if (-1 == pos) {
+ log.info("Trace output doesn't contain '" + part + "'");
+ Thread.sleep(1000);
+ break loop;
+ }
+ assertTrue("Did not find '" + part + "' in output", pos > 0);
+ assertTrue("'" + part + "' occurred earlier than the previous element unexpectedly", pos > lastPos);
+ lastPos = pos;
+ }
+ break;
+ } else {
+ log.info("Ignoring trace output as traceCount not greater than zero: " + traceCount);
+ Thread.sleep(1000);
+ }
+ }
+ if (tracer != null) {
+ tracer.destroy();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java b/test/src/main/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java
new file mode 100644
index 0000000..bc45dda
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/ConfigurableMajorCompactionIT.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.accumulo.tserver.compaction.CompactionPlan;
+import org.apache.accumulo.tserver.compaction.CompactionStrategy;
+import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
+import org.apache.accumulo.tserver.compaction.WriteParameters;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class ConfigurableMajorCompactionIT extends ConfigurableMacBase {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 30;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = new HashMap<String,String>();
+ siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1s");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ public static class TestCompactionStrategy extends CompactionStrategy {
+
+ @Override
+ public boolean shouldCompact(MajorCompactionRequest request) throws IOException {
+ return request.getFiles().size() == 5;
+ }
+
+ @Override
+ public CompactionPlan getCompactionPlan(MajorCompactionRequest request) throws IOException {
+ CompactionPlan plan = new CompactionPlan();
+ plan.inputFiles.addAll(request.getFiles().keySet());
+ plan.writeParameters = new WriteParameters();
+ plan.writeParameters.setBlockSize(1024 * 1024);
+ plan.writeParameters.setCompressType("none");
+ plan.writeParameters.setHdfsBlockSize(1024 * 1024);
+ plan.writeParameters.setIndexBlockSize(10);
+ plan.writeParameters.setReplication(7);
+ return plan;
+ }
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector conn = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ conn.tableOperations().create(tableName);
+ conn.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY.getKey(), TestCompactionStrategy.class.getName());
+ writeFile(conn, tableName);
+ writeFile(conn, tableName);
+ writeFile(conn, tableName);
+ writeFile(conn, tableName);
+ UtilWaitThread.sleep(2 * 1000);
+ assertEquals(4, countFiles(conn));
+ writeFile(conn, tableName);
+ int count = countFiles(conn);
+ assertTrue(count == 1 || count == 5);
+ while (count != 1) {
+ UtilWaitThread.sleep(250);
+ count = countFiles(conn);
+ }
+ }
+
+ private int countFiles(Connector conn) throws Exception {
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.TabletsSection.getRange());
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+ return Iterators.size(s.iterator());
+ }
+
+ private void writeFile(Connector conn, String tableName) throws Exception {
+ BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("row");
+ m.put("cf", "cq", "value");
+ bw.addMutation(m);
+ bw.close();
+ conn.tableOperations().flush(tableName, null, null, true);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java b/test/src/main/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java
new file mode 100644
index 0000000..b80bcb7
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/CreateTableWithNewTableConfigIT.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+
+/**
+ *
+ */
+public class CreateTableWithNewTableConfigIT extends SharedMiniClusterBase {
+ static private final Logger log = LoggerFactory.getLogger(CreateTableWithNewTableConfigIT.class);
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 30;
+ };
+
+ public int numProperties(Connector connector, String tableName) throws AccumuloException, TableNotFoundException {
+ return Iterators.size(connector.tableOperations().getProperties(tableName).iterator());
+ }
+
+ public int compareProperties(Connector connector, String tableNameOrig, String tableName, String changedProp) throws AccumuloException,
+ TableNotFoundException {
+ boolean inNew = false;
+ int countOrig = 0;
+ for (Entry<String,String> orig : connector.tableOperations().getProperties(tableNameOrig)) {
+ countOrig++;
+ for (Entry<String,String> entry : connector.tableOperations().getProperties(tableName)) {
+ if (entry.equals(orig)) {
+ inNew = true;
+ break;
+ } else if (entry.getKey().equals(orig.getKey()) && !entry.getKey().equals(changedProp))
+ Assert.fail("Property " + orig.getKey() + " has different value than deprecated method");
+ }
+ if (!inNew)
+ Assert.fail("Original property missing after using the new create method");
+ }
+ return countOrig;
+ }
+
+ public boolean checkTimeType(Connector connector, String tableName, TimeType expectedTimeType) throws TableNotFoundException {
+ final Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ String tableID = connector.tableOperations().tableIdMap().get(tableName) + "<";
+ for (Entry<Key,Value> entry : scanner) {
+ Key k = entry.getKey();
+
+ if (k.getRow().toString().equals(tableID) && k.getColumnQualifier().toString().equals(ServerColumnFamily.TIME_COLUMN.getColumnQualifier().toString())) {
+ if (expectedTimeType == TimeType.MILLIS && entry.getValue().toString().charAt(0) == 'M')
+ return true;
+ if (expectedTimeType == TimeType.LOGICAL && entry.getValue().toString().charAt(0) == 'L')
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @SuppressWarnings("deprecation")
+ @Test
+ public void tableNameOnly() throws Exception {
+ log.info("Starting tableNameOnly");
+
+ // Create a table with the initial properties
+ Connector connector = getConnector();
+ String tableName = getUniqueNames(2)[0];
+ connector.tableOperations().create(tableName, new NewTableConfiguration());
+
+ String tableNameOrig = "original";
+ connector.tableOperations().create(tableNameOrig, true);
+
+ int countNew = numProperties(connector, tableName);
+ int countOrig = compareProperties(connector, tableNameOrig, tableName, null);
+
+ Assert.assertEquals("Extra properties using the new create method", countOrig, countNew);
+ Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, TimeType.MILLIS));
+ }
+
+ @SuppressWarnings("deprecation")
+ @Test
+ public void tableNameAndLimitVersion() throws Exception {
+ log.info("Starting tableNameAndLimitVersion");
+
+ // Create a table with the initial properties
+ Connector connector = getConnector();
+ String tableName = getUniqueNames(2)[0];
+ boolean limitVersion = false;
+ connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators());
+
+ String tableNameOrig = "originalWithLimitVersion";
+ connector.tableOperations().create(tableNameOrig, limitVersion);
+
+ int countNew = numProperties(connector, tableName);
+ int countOrig = compareProperties(connector, tableNameOrig, tableName, null);
+
+ Assert.assertEquals("Extra properties using the new create method", countOrig, countNew);
+ Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, TimeType.MILLIS));
+ }
+
+ @SuppressWarnings("deprecation")
+ @Test
+ public void tableNameLimitVersionAndTimeType() throws Exception {
+ log.info("Starting tableNameLimitVersionAndTimeType");
+
+ // Create a table with the initial properties
+ Connector connector = getConnector();
+ String tableName = getUniqueNames(2)[0];
+ boolean limitVersion = false;
+ TimeType tt = TimeType.LOGICAL;
+ connector.tableOperations().create(tableName, new NewTableConfiguration().withoutDefaultIterators().setTimeType(tt));
+
+ String tableNameOrig = "originalWithLimitVersionAndTimeType";
+ connector.tableOperations().create(tableNameOrig, limitVersion, tt);
+
+ int countNew = numProperties(connector, tableName);
+ int countOrig = compareProperties(connector, tableNameOrig, tableName, null);
+
+ Assert.assertEquals("Extra properties using the new create method", countOrig, countNew);
+ Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, tt));
+ }
+
+ @SuppressWarnings("deprecation")
+ @Test
+ public void addCustomPropAndChangeExisting() throws Exception {
+ log.info("Starting addCustomPropAndChangeExisting");
+
+ // Create and populate initial properties map for creating table 1
+ Map<String,String> properties = new HashMap<String,String>();
+ String propertyName = Property.TABLE_SPLIT_THRESHOLD.getKey();
+ String volume = "10K";
+ properties.put(propertyName, volume);
+
+ String propertyName2 = "table.custom.testProp";
+ String volume2 = "Test property";
+ properties.put(propertyName2, volume2);
+
+ // Create a table with the initial properties
+ Connector connector = getConnector();
+ String tableName = getUniqueNames(2)[0];
+ connector.tableOperations().create(tableName, new NewTableConfiguration().setProperties(properties));
+
+ String tableNameOrig = "originalWithTableName";
+ connector.tableOperations().create(tableNameOrig, true);
+
+ int countNew = numProperties(connector, tableName);
+ int countOrig = compareProperties(connector, tableNameOrig, tableName, propertyName);
+
+ for (Entry<String,String> entry : connector.tableOperations().getProperties(tableName)) {
+ if (entry.getKey().equals(Property.TABLE_SPLIT_THRESHOLD.getKey()))
+ Assert.assertTrue("TABLE_SPLIT_THRESHOLD has been changed", entry.getValue().equals("10K"));
+ if (entry.getKey().equals("table.custom.testProp"))
+ Assert.assertTrue("table.custom.testProp has been changed", entry.getValue().equals("Test property"));
+ }
+
+ Assert.assertEquals("Extra properties using the new create method", countOrig + 1, countNew);
+ Assert.assertTrue("Wrong TimeType", checkTimeType(connector, tableName, TimeType.MILLIS));
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/DumpConfigIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/DumpConfigIT.java b/test/src/main/java/org/apache/accumulo/test/DumpConfigIT.java
new file mode 100644
index 0000000..5cc37a5
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/DumpConfigIT.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.util.Collections;
+
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.accumulo.test.functional.FunctionalTestUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+public class DumpConfigIT extends ConfigurableMacBase {
+
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder(new File(System.getProperty("user.dir") + "/target"));
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setSiteConfig(Collections.singletonMap(Property.TABLE_FILE_BLOCK_SIZE.getKey(), "1234567"));
+ }
+
+ @Test
+ public void test() throws Exception {
+ File siteFileBackup = new File(folder.getRoot(), "accumulo-site.xml.bak");
+ assertFalse(siteFileBackup.exists());
+ assertEquals(0, exec(Admin.class, new String[] {"dumpConfig", "-a", "-d", folder.getRoot().getPath()}).waitFor());
+ assertTrue(siteFileBackup.exists());
+ String site = FunctionalTestUtils.readAll(new FileInputStream(siteFileBackup));
+ assertTrue(site.contains(Property.TABLE_FILE_BLOCK_SIZE.getKey()));
+ assertTrue(site.contains("1234567"));
+ String meta = FunctionalTestUtils.readAll(new FileInputStream(new File(folder.getRoot(), MetadataTable.NAME + ".cfg")));
+ assertTrue(meta.contains(Property.TABLE_FILE_REPLICATION.getKey()));
+ String systemPerm = FunctionalTestUtils.readAll(new FileInputStream(new File(folder.getRoot(), "root_user.cfg")));
+ assertTrue(systemPerm.contains("grant System.ALTER_USER -s -u root"));
+ assertTrue(systemPerm.contains("grant Table.READ -t " + MetadataTable.NAME + " -u root"));
+ assertFalse(systemPerm.contains("grant Table.DROP -t " + MetadataTable.NAME + " -u root"));
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/ExistingMacIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/ExistingMacIT.java b/test/src/main/java/org/apache/accumulo/test/ExistingMacIT.java
new file mode 100644
index 0000000..52d2086
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/ExistingMacIT.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Collection;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class ExistingMacIT extends ConfigurableMacBase {
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+
+ // use raw local file system so walogs sync and flush will work
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ private void createEmptyConfig(File confFile) throws IOException {
+ Configuration conf = new Configuration(false);
+ OutputStream hcOut = new FileOutputStream(confFile);
+ conf.writeXml(hcOut);
+ hcOut.close();
+ }
+
+ @Test
+ public void testExistingInstance() throws Exception {
+
+ Connector conn = getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
+
+ conn.tableOperations().create("table1");
+
+ BatchWriter bw = conn.createBatchWriter("table1", new BatchWriterConfig());
+
+ Mutation m1 = new Mutation("00081");
+ m1.put("math", "sqroot", "9");
+ m1.put("math", "sq", "6560");
+
+ bw.addMutation(m1);
+ bw.close();
+
+ conn.tableOperations().flush("table1", null, null, true);
+ // TOOD use constants
+ conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
+ conn.tableOperations().flush(RootTable.NAME, null, null, true);
+
+ Set<Entry<ServerType,Collection<ProcessReference>>> procs = getCluster().getProcesses().entrySet();
+ for (Entry<ServerType,Collection<ProcessReference>> entry : procs) {
+ if (entry.getKey() == ServerType.ZOOKEEPER)
+ continue;
+ for (ProcessReference pr : entry.getValue())
+ getCluster().killProcess(entry.getKey(), pr);
+ }
+
+ // TODO clean out zookeeper? following sleep waits for ephemeral nodes to go away
+ UtilWaitThread.sleep(10000);
+
+ File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf");
+ FileUtils.deleteQuietly(hadoopConfDir);
+ assertTrue(hadoopConfDir.mkdirs());
+ createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
+ createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
+
+ File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_2");
+ FileUtils.deleteQuietly(testDir2);
+
+ MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
+ macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
+
+ MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
+ accumulo2.start();
+
+ conn = accumulo2.getConnector("root", new PasswordToken(ROOT_PASSWORD));
+
+ Scanner scanner = conn.createScanner("table1", Authorizations.EMPTY);
+
+ int sum = 0;
+ for (Entry<Key,Value> entry : scanner) {
+ sum += Integer.parseInt(entry.getValue().toString());
+ }
+
+ Assert.assertEquals(6569, sum);
+
+ accumulo2.stop();
+ }
+
+ @Test
+ public void testExistingRunningInstance() throws Exception {
+ final String table = getUniqueNames(1)[0];
+ Connector conn = getConnector();
+ // Ensure that a master and tserver are up so the existing instance check won't fail.
+ conn.tableOperations().create(table);
+ BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+ Mutation m = new Mutation("foo");
+ m.put("cf", "cq", "value");
+ bw.addMutation(m);
+ bw.close();
+
+ File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf_2");
+ FileUtils.deleteQuietly(hadoopConfDir);
+ assertTrue(hadoopConfDir.mkdirs());
+ createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
+ createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
+
+ File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_3");
+ FileUtils.deleteQuietly(testDir2);
+
+ MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
+ macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
+
+ System.out.println("conf " + new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"));
+
+ MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
+ try {
+ accumulo2.start();
+ Assert.fail("A 2nd MAC instance should not be able to start over an existing MAC instance");
+ } catch (RuntimeException e) {
+ // TODO check message or throw more explicit exception
+ }
+ }
+}
[17/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
deleted file mode 100644
index 7740492..0000000
--- a/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
+++ /dev/null
@@ -1,1609 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.OutputStreamWriter;
-import java.io.PrintWriter;
-import java.lang.reflect.Constructor;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-
-import jline.console.ConsoleReader;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.client.impl.Namespaces;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVWriter;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.SharedMiniClusterBase;
-import org.apache.accumulo.shell.Shell;
-import org.apache.accumulo.test.functional.SlowIterator;
-import org.apache.accumulo.tracer.TraceServer;
-import org.apache.commons.configuration.ConfigurationException;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.tools.DistCp;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-public class ShellServerIT extends SharedMiniClusterBase {
- public static class TestOutputStream extends OutputStream {
- StringBuilder sb = new StringBuilder();
-
- @Override
- public void write(int b) throws IOException {
- sb.append((char) (0xff & b));
- }
-
- public String get() {
- return sb.toString();
- }
-
- public void clear() {
- sb.setLength(0);
- }
- }
-
- private static final Logger log = LoggerFactory.getLogger(ShellServerIT.class);
-
- public static class StringInputStream extends InputStream {
- private String source = "";
- private int offset = 0;
-
- @Override
- public int read() throws IOException {
- if (offset == source.length())
- return '\n';
- else
- return source.charAt(offset++);
- }
-
- public void set(String other) {
- source = other;
- offset = 0;
- }
- }
-
- private static abstract class ErrorMessageCallback {
- public abstract String getErrorMessage();
- }
-
- private static class NoOpErrorMessageCallback extends ErrorMessageCallback {
- private static final String empty = "";
-
- @Override
- public String getErrorMessage() {
- return empty;
- }
- }
-
- public static class TestShell {
- public TestOutputStream output;
- public StringInputStream input;
- public Shell shell;
-
- TestShell(String user, String rootPass, String instanceName, String zookeepers, File configFile) throws IOException {
- ClientConfiguration clientConf;
- try {
- clientConf = new ClientConfiguration(configFile);
- } catch (ConfigurationException e) {
- throw new IOException(e);
- }
- // start the shell
- output = new TestOutputStream();
- input = new StringInputStream();
- PrintWriter pw = new PrintWriter(new OutputStreamWriter(output));
- shell = new Shell(new ConsoleReader(input, output), pw);
- shell.setLogErrorsToConsole();
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- // Pull the kerberos principal out when we're using SASL
- shell.config("-u", user, "-z", instanceName, zookeepers, "--config-file", configFile.getAbsolutePath());
- } else {
- shell.config("-u", user, "-p", rootPass, "-z", instanceName, zookeepers, "--config-file", configFile.getAbsolutePath());
- }
- exec("quit", true);
- shell.start();
- shell.setExit(false);
- }
-
- String exec(String cmd) throws IOException {
- output.clear();
- shell.execCommand(cmd, true, true);
- return output.get();
- }
-
- String exec(String cmd, boolean expectGoodExit) throws IOException {
- return exec(cmd, expectGoodExit, noop);
- }
-
- String exec(String cmd, boolean expectGoodExit, ErrorMessageCallback callback) throws IOException {
- String result = exec(cmd);
- if (expectGoodExit)
- assertGoodExit("", true, callback);
- else
- assertBadExit("", true, callback);
- return result;
- }
-
- String exec(String cmd, boolean expectGoodExit, String expectString) throws IOException {
- return exec(cmd, expectGoodExit, expectString, noop);
- }
-
- String exec(String cmd, boolean expectGoodExit, String expectString, ErrorMessageCallback callback) throws IOException {
- return exec(cmd, expectGoodExit, expectString, true, callback);
- }
-
- String exec(String cmd, boolean expectGoodExit, String expectString, boolean stringPresent) throws IOException {
- return exec(cmd, expectGoodExit, expectString, stringPresent, noop);
- }
-
- String exec(String cmd, boolean expectGoodExit, String expectString, boolean stringPresent, ErrorMessageCallback callback) throws IOException {
- String result = exec(cmd);
- if (expectGoodExit)
- assertGoodExit(expectString, stringPresent, callback);
- else
- assertBadExit(expectString, stringPresent, callback);
- return result;
- }
-
- void assertGoodExit(String s, boolean stringPresent) {
- assertGoodExit(s, stringPresent, noop);
- }
-
- void assertGoodExit(String s, boolean stringPresent, ErrorMessageCallback callback) {
- Shell.log.info(output.get());
- if (0 != shell.getExitCode()) {
- String errorMsg = callback.getErrorMessage();
- assertEquals(errorMsg, 0, shell.getExitCode());
- }
-
- if (s.length() > 0)
- assertEquals(s + " present in " + output.get() + " was not " + stringPresent, stringPresent, output.get().contains(s));
- }
-
- void assertBadExit(String s, boolean stringPresent, ErrorMessageCallback callback) {
- Shell.log.debug(output.get());
- if (0 == shell.getExitCode()) {
- String errorMsg = callback.getErrorMessage();
- assertTrue(errorMsg, shell.getExitCode() > 0);
- }
-
- if (s.length() > 0)
- assertEquals(s + " present in " + output.get() + " was not " + stringPresent, stringPresent, output.get().contains(s));
- shell.resetExitCode();
- }
- }
-
- private static final NoOpErrorMessageCallback noop = new NoOpErrorMessageCallback();
-
- private TestShell ts;
-
- private static Process traceProcess;
- private static String rootPath;
-
- @Rule
- public TestName name = new TestName();
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- rootPath = getMiniClusterDir().getAbsolutePath();
-
- // history file is updated in $HOME
- System.setProperty("HOME", rootPath);
- System.setProperty("hadoop.tmp.dir", System.getProperty("user.dir") + "/target/hadoop-tmp");
-
- traceProcess = getCluster().exec(TraceServer.class);
-
- Connector conn = getCluster().getConnector(getPrincipal(), getToken());
- TableOperations tops = conn.tableOperations();
-
- // give the tracer some time to start
- while (!tops.exists("trace")) {
- UtilWaitThread.sleep(1000);
- }
- }
-
- @Before
- public void setupShell() throws Exception {
- ts = new TestShell(getPrincipal(), getRootPassword(), getCluster().getConfig().getInstanceName(), getCluster().getConfig().getZooKeepers(), getCluster()
- .getConfig().getClientConfFile());
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- if (null != traceProcess) {
- traceProcess.destroy();
- }
- }
-
- @After
- public void deleteTables() throws Exception {
- Connector c = getConnector();
- for (String table : c.tableOperations().list()) {
- if (!table.startsWith(Namespaces.ACCUMULO_NAMESPACE + ".") && !table.equals("trace"))
- try {
- c.tableOperations().delete(table);
- } catch (TableNotFoundException e) {
- // don't care
- }
- }
- }
-
- @After
- public void tearDownShell() {
- ts.shell.shutdown();
- }
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void exporttableImporttable() throws Exception {
- final String table = name.getMethodName(), table2 = table + "2";
-
- // exporttable / importtable
- ts.exec("createtable " + table + " -evc", true);
- make10();
- ts.exec("addsplits row5", true);
- ts.exec("config -t " + table + " -s table.split.threshold=345M", true);
- ts.exec("offline " + table, true);
- File exportDir = new File(rootPath, "ShellServerIT.export");
- String exportUri = "file://" + exportDir.toString();
- String localTmp = "file://" + new File(rootPath, "ShellServerIT.tmp").toString();
- ts.exec("exporttable -t " + table + " " + exportUri, true);
- DistCp cp = newDistCp();
- String import_ = "file://" + new File(rootPath, "ShellServerIT.import").toString();
- if (getCluster().getClientConfig().getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- // DistCp bugs out trying to get a fs delegation token to perform the cp. Just copy it ourselves by hand.
- FileSystem fs = getCluster().getFileSystem();
- FileSystem localFs = FileSystem.getLocal(new Configuration(false));
-
- // Path on local fs to cp into
- Path localTmpPath = new Path(localTmp);
- localFs.mkdirs(localTmpPath);
-
- // Path in remote fs to importtable from
- Path importDir = new Path(import_);
- fs.mkdirs(importDir);
-
- // Implement a poor-man's DistCp
- try (BufferedReader reader = new BufferedReader(new FileReader(new File(exportDir, "distcp.txt")))) {
- for (String line; (line = reader.readLine()) != null;) {
- Path exportedFile = new Path(line);
- // There isn't a cp on FileSystem??
- log.info("Copying " + line + " to " + localTmpPath);
- fs.copyToLocalFile(exportedFile, localTmpPath);
- Path tmpFile = new Path(localTmpPath, exportedFile.getName());
- log.info("Moving " + tmpFile + " to the import directory " + importDir);
- fs.moveFromLocalFile(tmpFile, importDir);
- }
- }
- } else {
- String[] distCpArgs = new String[] {"-f", exportUri + "/distcp.txt", import_};
- assertEquals("Failed to run distcp: " + Arrays.toString(distCpArgs), 0, cp.run(distCpArgs));
- }
- ts.exec("importtable " + table2 + " " + import_, true);
- ts.exec("config -t " + table2 + " -np", true, "345M", true);
- ts.exec("getsplits -t " + table2, true, "row5", true);
- ts.exec("constraint --list -t " + table2, true, "VisibilityConstraint=2", true);
- ts.exec("online " + table, true);
- ts.exec("deletetable -f " + table, true);
- ts.exec("deletetable -f " + table2, true);
- }
-
- private DistCp newDistCp() {
- try {
- @SuppressWarnings("unchecked")
- Constructor<DistCp>[] constructors = (Constructor<DistCp>[]) DistCp.class.getConstructors();
- for (Constructor<DistCp> constructor : constructors) {
- Class<?>[] parameterTypes = constructor.getParameterTypes();
- if (parameterTypes.length > 0 && parameterTypes[0].equals(Configuration.class)) {
- if (parameterTypes.length == 1) {
- return constructor.newInstance(new Configuration());
- } else if (parameterTypes.length == 2) {
- return constructor.newInstance(new Configuration(), null);
- }
- }
- }
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- throw new RuntimeException("Unexpected constructors for DistCp");
- }
-
- @Test
- public void setscaniterDeletescaniter() throws Exception {
- final String table = name.getMethodName();
-
- // setscaniter, deletescaniter
- ts.exec("createtable " + table);
- ts.exec("insert a cf cq 1");
- ts.exec("insert a cf cq 1");
- ts.exec("insert a cf cq 1");
- ts.input.set("true\n\n\nSTRING");
- ts.exec("setscaniter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n name", true);
- ts.exec("scan", true, "3", true);
- ts.exec("deletescaniter -n name", true);
- ts.exec("scan", true, "1", true);
- ts.exec("deletetable -f " + table);
-
- }
-
- @Test
- public void execfile() throws Exception {
- // execfile
- File file = File.createTempFile("ShellServerIT.execfile", ".conf", new File(rootPath));
- PrintWriter writer = new PrintWriter(file.getAbsolutePath());
- writer.println("about");
- writer.close();
- ts.exec("execfile " + file.getAbsolutePath(), true, Constants.VERSION, true);
-
- }
-
- @Test
- public void egrep() throws Exception {
- final String table = name.getMethodName();
-
- // egrep
- ts.exec("createtable " + table);
- make10();
- String lines = ts.exec("egrep row[123]", true);
- assertTrue(lines.split("\n").length - 1 == 3);
- ts.exec("deletetable -f " + table);
- }
-
- @Test
- public void du() throws Exception {
- final String table = name.getMethodName();
-
- // create and delete a table so we get out of a table context in the shell
- ts.exec("notable", true);
-
- // Calling du not in a table context shouldn't throw an error
- ts.output.clear();
- ts.exec("du", true, "", true);
-
- ts.output.clear();
- ts.exec("createtable " + table);
- make10();
- ts.exec("flush -t " + table + " -w");
- ts.exec("du " + table, true, " [" + table + "]", true);
- ts.output.clear();
- ts.shell.execCommand("du -h", false, false);
- String o = ts.output.get();
- // for some reason, there's a bit of fluctuation
- assertTrue("Output did not match regex: '" + o + "'", o.matches(".*[1-9][0-9][0-9]\\s\\[" + table + "\\]\\n"));
- ts.exec("deletetable -f " + table);
- }
-
- @Test
- public void debug() throws Exception {
- ts.exec("debug", true, "off", true);
- ts.exec("debug on", true);
- ts.exec("debug", true, "on", true);
- ts.exec("debug off", true);
- ts.exec("debug", true, "off", true);
- ts.exec("debug debug", false);
- ts.exec("debug debug debug", false);
- }
-
- @Test
- public void user() throws Exception {
- final String table = name.getMethodName();
- final boolean kerberosEnabled = getToken() instanceof KerberosToken;
-
- // createuser, deleteuser, user, users, droptable, grant, revoke
- if (!kerberosEnabled) {
- ts.input.set("secret\nsecret\n");
- }
- ts.exec("createuser xyzzy", true);
- ts.exec("users", true, "xyzzy", true);
- String perms = ts.exec("userpermissions -u xyzzy", true);
- assertTrue(perms.contains("Table permissions (" + MetadataTable.NAME + "): Table.READ"));
- ts.exec("grant -u xyzzy -s System.CREATE_TABLE", true);
- perms = ts.exec("userpermissions -u xyzzy", true);
- assertTrue(perms.contains(""));
- ts.exec("grant -u " + getPrincipal() + " -t " + MetadataTable.NAME + " Table.WRITE", true);
- ts.exec("grant -u " + getPrincipal() + " -t " + MetadataTable.NAME + " Table.GOOFY", false);
- ts.exec("grant -u " + getPrincipal() + " -s foo", false);
- ts.exec("grant -u xyzzy -t " + MetadataTable.NAME + " foo", false);
- if (!kerberosEnabled) {
- ts.input.set("secret\nsecret\n");
- ts.exec("user xyzzy", true);
- ts.exec("createtable " + table, true, "xyzzy@", true);
- ts.exec("insert row1 cf cq 1", true);
- ts.exec("scan", true, "row1", true);
- ts.exec("droptable -f " + table, true);
- ts.input.set(getRootPassword() + "\n" + getRootPassword() + "\n");
- ts.exec("user root", true);
- }
- ts.exec("deleteuser " + getPrincipal(), false, "delete yourself", true);
- ts.exec("revoke -u xyzzy -s System.CREATE_TABLE", true);
- ts.exec("revoke -u xyzzy -s System.GOOFY", false);
- ts.exec("revoke -u xyzzy -s foo", false);
- ts.exec("revoke -u xyzzy -t " + MetadataTable.NAME + " Table.WRITE", true);
- ts.exec("revoke -u xyzzy -t " + MetadataTable.NAME + " Table.GOOFY", false);
- ts.exec("revoke -u xyzzy -t " + MetadataTable.NAME + " foo", false);
- ts.exec("deleteuser xyzzy", true, "deleteuser { xyzzy } (yes|no)?", true);
- ts.exec("deleteuser -f xyzzy", true);
- ts.exec("users", true, "xyzzy", false);
- }
-
- @Test
- public void durability() throws Exception {
- final String table = name.getMethodName();
- ts.exec("createtable " + table);
- ts.exec("insert -d none a cf cq randomGunkaASDFWEAQRd");
- ts.exec("insert -d foo a cf cq2 2", false, "foo", true);
- ts.exec("scan -r a", true, "randomGunkaASDFWEAQRd", true);
- ts.exec("scan -r a", true, "foo", false);
- }
-
- @Test
- public void iter() throws Exception {
- final String table = name.getMethodName();
-
- // setshelliter, listshelliter, deleteshelliter
- ts.exec("createtable " + table);
- ts.exec("insert a cf cq 1");
- ts.exec("insert a cf cq 1");
- ts.exec("insert a cf cq 1");
- ts.input.set("true\n\n\nSTRING\n");
- ts.exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -pn sum -n name", true);
- ts.exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -pn sum -n name", false);
- ts.exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -pn sum -n other", false);
- ts.input.set("true\n\n\nSTRING\n");
- ts.exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -pn sum -n xyzzy", true);
- ts.exec("scan -pn sum", true, "3", true);
- ts.exec("listshelliter", true, "Iterator name", true);
- ts.exec("listshelliter", true, "Iterator xyzzy", true);
- ts.exec("listshelliter", true, "Profile : sum", true);
- ts.exec("deleteshelliter -pn sum -n name", true);
- ts.exec("listshelliter", true, "Iterator name", false);
- ts.exec("listshelliter", true, "Iterator xyzzy", true);
- ts.exec("deleteshelliter -pn sum -a", true);
- ts.exec("listshelliter", true, "Iterator xyzzy", false);
- ts.exec("listshelliter", true, "Profile : sum", false);
- ts.exec("deletetable -f " + table);
- // list iter
- ts.exec("createtable " + table);
- ts.exec("insert a cf cq 1");
- ts.exec("insert a cf cq 1");
- ts.exec("insert a cf cq 1");
- ts.input.set("true\n\n\nSTRING\n");
- ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n name", true);
- ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -n name", false);
- ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n other", false);
- ts.input.set("true\n\n\nSTRING\n");
- ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -n xyzzy", true);
- ts.exec("scan", true, "3", true);
- ts.exec("listiter -scan", true, "Iterator name", true);
- ts.exec("listiter -scan", true, "Iterator xyzzy", true);
- ts.exec("listiter -minc", true, "Iterator name", false);
- ts.exec("listiter -minc", true, "Iterator xyzzy", false);
- ts.exec("deleteiter -scan -n name", true);
- ts.exec("listiter -scan", true, "Iterator name", false);
- ts.exec("listiter -scan", true, "Iterator xyzzy", true);
- ts.exec("deletetable -f " + table);
-
- }
-
- @Test
- public void setIterOptionPrompt() throws Exception {
- Connector conn = getConnector();
- String tableName = name.getMethodName();
-
- ts.exec("createtable " + tableName);
- ts.input.set("\n\n");
- // Setting a non-optiondescriber with no name should fail
- ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30", false);
-
- // Name as option will work
- ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30 -name cfcounter", true);
-
- String expectedKey = "table.iterator.scan.cfcounter";
- String expectedValue = "30,org.apache.accumulo.core.iterators.ColumnFamilyCounter";
- TableOperations tops = conn.tableOperations();
- checkTableForProperty(tops, tableName, expectedKey, expectedValue);
-
- ts.exec("deletetable " + tableName, true);
- tableName = tableName + "1";
-
- ts.exec("createtable " + tableName, true);
-
- ts.input.set("customcfcounter\n\n");
-
- // Name on the CLI should override OptionDescriber (or user input name, in this case)
- ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30", true);
- expectedKey = "table.iterator.scan.customcfcounter";
- expectedValue = "30,org.apache.accumulo.core.iterators.ColumnFamilyCounter";
- checkTableForProperty(tops, tableName, expectedKey, expectedValue);
-
- ts.exec("deletetable " + tableName, true);
- tableName = tableName + "1";
-
- ts.exec("createtable " + tableName, true);
-
- ts.input.set("customcfcounter\nname1 value1\nname2 value2\n\n");
-
- // Name on the CLI should override OptionDescriber (or user input name, in this case)
- ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30", true);
- expectedKey = "table.iterator.scan.customcfcounter";
- expectedValue = "30,org.apache.accumulo.core.iterators.ColumnFamilyCounter";
- checkTableForProperty(tops, tableName, expectedKey, expectedValue);
- expectedKey = "table.iterator.scan.customcfcounter.opt.name1";
- expectedValue = "value1";
- checkTableForProperty(tops, tableName, expectedKey, expectedValue);
- expectedKey = "table.iterator.scan.customcfcounter.opt.name2";
- expectedValue = "value2";
- checkTableForProperty(tops, tableName, expectedKey, expectedValue);
-
- ts.exec("deletetable " + tableName, true);
- tableName = tableName + "1";
-
- ts.exec("createtable " + tableName, true);
-
- ts.input.set("\nname1 value1.1,value1.2,value1.3\nname2 value2\n\n");
-
- // Name on the CLI should override OptionDescriber (or user input name, in this case)
- ts.exec("setiter -scan -class org.apache.accumulo.core.iterators.ColumnFamilyCounter -p 30 -name cfcounter", true);
- expectedKey = "table.iterator.scan.cfcounter";
- expectedValue = "30,org.apache.accumulo.core.iterators.ColumnFamilyCounter";
- checkTableForProperty(tops, tableName, expectedKey, expectedValue);
- expectedKey = "table.iterator.scan.cfcounter.opt.name1";
- expectedValue = "value1.1,value1.2,value1.3";
- checkTableForProperty(tops, tableName, expectedKey, expectedValue);
- expectedKey = "table.iterator.scan.cfcounter.opt.name2";
- expectedValue = "value2";
- checkTableForProperty(tops, tableName, expectedKey, expectedValue);
- }
-
- protected void checkTableForProperty(TableOperations tops, String tableName, String expectedKey, String expectedValue) throws Exception {
- for (int i = 0; i < 5; i++) {
- for (Entry<String,String> entry : tops.getProperties(tableName)) {
- if (expectedKey.equals(entry.getKey())) {
- assertEquals(expectedValue, entry.getValue());
- return;
- }
- }
- Thread.sleep(500);
- }
-
- fail("Failed to find expected property on " + tableName + ": " + expectedKey + "=" + expectedValue);
- }
-
- @Test
- public void notable() throws Exception {
- final String table = name.getMethodName();
-
- // notable
- ts.exec("createtable " + table, true);
- ts.exec("scan", true, " " + table + ">", true);
- assertTrue(ts.output.get().contains(" " + table + ">"));
- ts.exec("notable", true);
- ts.exec("scan", false, "Not in a table context.", true);
- assertFalse(ts.output.get().contains(" " + table + ">"));
- ts.exec("deletetable -f " + table);
- }
-
- @Test
- public void sleep() throws Exception {
- // sleep
- long now = System.currentTimeMillis();
- ts.exec("sleep 0.2", true);
- long diff = System.currentTimeMillis() - now;
- assertTrue("Diff was actually " + diff, diff >= 200);
- assertTrue("Diff was actually " + diff, diff < 600);
- }
-
- @Test
- public void addauths() throws Exception {
- final String table = name.getMethodName();
- // addauths
- ts.exec("createtable " + table + " -evc");
- boolean success = false;
- for (int i = 0; i < 9 && !success; i++) {
- try {
- ts.exec("insert a b c d -l foo", false, "does not have authorization", true, new ErrorMessageCallback() {
- @Override
- public String getErrorMessage() {
- try {
- Connector c = getConnector();
- return "Current auths for root are: " + c.securityOperations().getUserAuthorizations("root").toString();
- } catch (Exception e) {
- return "Could not check authorizations";
- }
- }
- });
- } catch (AssertionError e) {
- Thread.sleep(200);
- }
- }
- if (!success) {
- ts.exec("insert a b c d -l foo", false, "does not have authorization", true, new ErrorMessageCallback() {
- @Override
- public String getErrorMessage() {
- try {
- Connector c = getConnector();
- return "Current auths for root are: " + c.securityOperations().getUserAuthorizations("root").toString();
- } catch (Exception e) {
- return "Could not check authorizations";
- }
- }
- });
- }
- ts.exec("addauths -s foo,bar", true);
- boolean passed = false;
- for (int i = 0; i < 50 && !passed; i++) {
- try {
- ts.exec("getauths", true, "foo", true);
- ts.exec("getauths", true, "bar", true);
- passed = true;
- } catch (Exception e) {
- UtilWaitThread.sleep(300);
- }
- }
- assertTrue("Could not successfully see updated authoriations", passed);
- ts.exec("insert a b c d -l foo");
- ts.exec("scan", true, "[foo]");
- ts.exec("scan -s bar", true, "[foo]", false);
- ts.exec("deletetable -f " + table);
- }
-
- @Test
- public void getAuths() throws Exception {
- Assume.assumeFalse("test skipped for kerberos", getToken() instanceof KerberosToken);
-
- // create two users with different auths
- for (int i = 1; i <= 2; i++) {
- String userName = name.getMethodName() + "user" + i;
- String password = "password" + i;
- String auths = "auth" + i + "A,auth" + i + "B";
- ts.exec("createuser " + userName, true);
- ts.exec(password, true);
- ts.exec("addauths -u " + userName + " -s " + auths, true);
- }
-
- // get auths using root user, which has System.SYSTEM
- ts.exec("getauths -u getAuthsuser1", true, "auth1A", true);
- ts.exec("getauths -u getAuthsuser1", true, "auth1B", true);
- ts.exec("getauths -u getAuthsuser2", true, "auth2A", true);
- ts.exec("getauths -u getAuthsuser2", true, "auth2B", true);
-
- // grant the first user the ability to see other users auths
- ts.exec("grant -u getAuthsuser1 -s System.ALTER_USER", true);
-
- // switch to first user (the one with the ALTER_USER perm)
- ts.exec("user getAuthsuser1", true);
- ts.exec("password1", true);
-
- // get auths for self and other user
- ts.exec("getauths -u getAuthsuser1", true, "auth1A", true);
- ts.exec("getauths -u getAuthsuser1", true, "auth1B", true);
- ts.exec("getauths -u getAuthsuser2", true, "auth2A", true);
- ts.exec("getauths -u getAuthsuser2", true, "auth2B", true);
-
- // switch to second user (the one without the ALTER_USER perm)
- ts.exec("user getAuthsuser2", true);
- ts.exec("password2", true);
-
- // get auths for self, but not other user
- ts.exec("getauths -u getAuthsuser2", true, "auth2A", true);
- ts.exec("getauths -u getAuthsuser2", true, "auth2B", true);
- ts.exec("getauths -u getAuthsuser1", false, "PERMISSION_DENIED", true);
- ts.exec("getauths -u getAuthsuser1", false, "PERMISSION_DENIED", true);
- }
-
- @Test
- public void byeQuitExit() throws Exception {
- // bye, quit, exit
- for (String cmd : "bye quit exit".split(" ")) {
- assertFalse(ts.shell.getExit());
- ts.exec(cmd);
- assertTrue(ts.shell.getExit());
- ts.shell.setExit(false);
- }
- }
-
- @Test
- public void classpath() throws Exception {
- // classpath
- ts.exec("classpath", true, "Level 2: Java Classloader (loads everything defined by java classpath) URL classpath items are", true);
- }
-
- @Test
- public void clearCls() throws Exception {
- // clear/cls
- if (ts.shell.getReader().getTerminal().isAnsiSupported()) {
- ts.exec("cls", true, "[1;1H");
- ts.exec("clear", true, "[2J");
- } else {
- ts.exec("cls", false, "does not support");
- ts.exec("clear", false, "does not support");
- }
- }
-
- @Test
- public void clonetable() throws Exception {
- final String table = name.getMethodName(), clone = table + "_clone";
-
- // clonetable
- ts.exec("createtable " + table + " -evc");
- ts.exec("config -t " + table + " -s table.split.threshold=123M", true);
- ts.exec("addsplits -t " + table + " a b c", true);
- ts.exec("insert a b c value");
- ts.exec("scan", true, "value", true);
- ts.exec("clonetable " + table + " " + clone);
- // verify constraint, config, and splits were cloned
- ts.exec("constraint --list -t " + clone, true, "VisibilityConstraint=2", true);
- ts.exec("config -t " + clone + " -np", true, "123M", true);
- ts.exec("getsplits -t " + clone, true, "a\nb\nc\n");
- ts.exec("deletetable -f " + table);
- ts.exec("deletetable -f " + clone);
- }
-
- @Test
- public void createTableWithProperties() throws Exception {
- final String table = name.getMethodName();
-
- // create table with initial properties
- String testProp = "table.custom.description=description,table.custom.testProp=testProp," + Property.TABLE_SPLIT_THRESHOLD.getKey() + "=10K";
-
- ts.exec("createtable " + table + " -prop " + testProp, true);
- ts.exec("insert a b c value", true);
- ts.exec("scan", true, "value", true);
-
- Connector connector = getConnector();
- for (Entry<String,String> entry : connector.tableOperations().getProperties(table)) {
- if (entry.getKey().equals("table.custom.description"))
- Assert.assertTrue("Initial property was not set correctly", entry.getValue().equals("description"));
-
- if (entry.getKey().equals("table.custom.testProp"))
- Assert.assertTrue("Initial property was not set correctly", entry.getValue().equals("testProp"));
-
- if (entry.getKey().equals(Property.TABLE_SPLIT_THRESHOLD.getKey()))
- Assert.assertTrue("Initial property was not set correctly", entry.getValue().equals("10K"));
-
- }
- ts.exec("deletetable -f " + table);
- }
-
- @Test
- public void testCompactions() throws Exception {
- final String table = name.getMethodName();
-
- // compact
- ts.exec("createtable " + table);
-
- String tableId = getTableId(table);
-
- // make two files
- ts.exec("insert a b c d");
- ts.exec("flush -w");
- ts.exec("insert x y z v");
- ts.exec("flush -w");
- int oldCount = countFiles(tableId);
- // merge two files into one
- ts.exec("compact -t " + table + " -w");
- assertTrue(countFiles(tableId) < oldCount);
- ts.exec("addsplits -t " + table + " f");
- // make two more files:
- ts.exec("insert m 1 2 3");
- ts.exec("flush -w");
- ts.exec("insert n 1 2 v901");
- ts.exec("flush -w");
- List<String> oldFiles = getFiles(tableId);
-
- // at this point there are 4 files in the default tablet
- assertEquals("Files that were found: " + oldFiles, 4, oldFiles.size());
-
- // compact some data:
- ts.exec("compact -b g -e z -w");
- assertEquals(2, countFiles(tableId));
- ts.exec("compact -w");
- assertEquals(2, countFiles(tableId));
- ts.exec("merge --all -t " + table);
- ts.exec("compact -w");
- assertEquals(1, countFiles(tableId));
-
- // test compaction strategy
- ts.exec("insert z 1 2 v900");
- ts.exec("compact -w -s " + TestCompactionStrategy.class.getName() + " -sc inputPrefix=F,dropPrefix=A");
- assertEquals(1, countFiles(tableId));
- ts.exec("scan", true, "v900", true);
- ts.exec("scan", true, "v901", false);
-
- ts.exec("deletetable -f " + table);
- }
-
- @Test
- public void testCompactionSelection() throws Exception {
- final String table = name.getMethodName();
- final String clone = table + "_clone";
-
- ts.exec("createtable " + table);
- ts.exec("insert a b c d");
- ts.exec("flush -w");
- ts.exec("insert x y z v");
- ts.exec("flush -w");
-
- ts.exec("clonetable -s " + Property.TABLE_MAJC_RATIO.getKey() + "=10 " + table + " " + clone);
-
- ts.exec("table " + clone);
- ts.exec("insert m n l o");
- ts.exec("flush -w");
-
- String tableId = getTableId(table);
- String cloneId = getTableId(clone);
-
- assertEquals(3, countFiles(cloneId));
-
- // compact only files from src table
- ts.exec("compact -t " + clone + " -w --sf-epath .*tables/" + tableId + ".*");
-
- assertEquals(2, countFiles(cloneId));
-
- ts.exec("insert r s t u");
- ts.exec("flush -w");
-
- assertEquals(3, countFiles(cloneId));
-
- // compact all flush files
- ts.exec("compact -t " + clone + " -w --sf-ename F.*");
-
- assertEquals(2, countFiles(cloneId));
-
- // create two large files
- Random rand = new Random();
- StringBuilder sb = new StringBuilder("insert b v q ");
- for (int i = 0; i < 10000; i++) {
- sb.append('a' + rand.nextInt(26));
- }
-
- ts.exec(sb.toString());
- ts.exec("flush -w");
-
- ts.exec(sb.toString());
- ts.exec("flush -w");
-
- assertEquals(4, countFiles(cloneId));
-
- // compact only small files
- ts.exec("compact -t " + clone + " -w --sf-lt-esize 1000");
-
- assertEquals(3, countFiles(cloneId));
-
- // compact large files if 3 or more
- ts.exec("compact -t " + clone + " -w --sf-gt-esize 1K --min-files 3");
-
- assertEquals(3, countFiles(cloneId));
-
- // compact large files if 2 or more
- ts.exec("compact -t " + clone + " -w --sf-gt-esize 1K --min-files 2");
-
- assertEquals(2, countFiles(cloneId));
-
- // compact if tablet has 3 or more files
- ts.exec("compact -t " + clone + " -w --min-files 3");
-
- assertEquals(2, countFiles(cloneId));
-
- // compact if tablet has 2 or more files
- ts.exec("compact -t " + clone + " -w --min-files 2");
-
- assertEquals(1, countFiles(cloneId));
-
- // create two small and one large flush files in order to test AND
- ts.exec(sb.toString());
- ts.exec("flush -w");
-
- ts.exec("insert m n l o");
- ts.exec("flush -w");
-
- ts.exec("insert m n l o");
- ts.exec("flush -w");
-
- assertEquals(4, countFiles(cloneId));
-
- // should only compact two small flush files leaving large flush file
- ts.exec("compact -t " + clone + " -w --sf-ename F.* --sf-lt-esize 1K");
-
- assertEquals(3, countFiles(cloneId));
- }
-
- @Test
- public void testCompactionSelectionAndStrategy() throws Exception {
-
- final String table = name.getMethodName();
-
- ts.exec("createtable " + table);
-
- // expect this to fail
- ts.exec("compact -t " + table + " -w --sf-ename F.* -s " + TestCompactionStrategy.class.getName() + " -sc inputPrefix=F,dropPrefix=A", false);
- }
-
- @Test
- public void constraint() throws Exception {
- final String table = name.getMethodName();
-
- // constraint
- ts.exec("constraint -l -t " + MetadataTable.NAME + "", true, "MetadataConstraints=1", true);
- ts.exec("createtable " + table + " -evc");
-
- // Make sure the table is fully propagated through zoocache
- getTableId(table);
-
- ts.exec("constraint -l -t " + table, true, "VisibilityConstraint=2", true);
- ts.exec("constraint -t " + table + " -d 2", true, "Removed constraint 2 from table " + table);
- // wait for zookeeper updates to propagate
- UtilWaitThread.sleep(1000);
- ts.exec("constraint -l -t " + table, true, "VisibilityConstraint=2", false);
- ts.exec("deletetable -f " + table);
- }
-
- @Test
- public void deletemany() throws Exception {
- final String table = name.getMethodName();
-
- // deletemany
- ts.exec("createtable " + table);
- make10();
- assertEquals(10, countkeys(table));
- ts.exec("deletemany -f -b row8");
- assertEquals(8, countkeys(table));
- ts.exec("scan -t " + table + " -np", true, "row8", false);
- make10();
- ts.exec("deletemany -f -b row4 -e row5");
- assertEquals(8, countkeys(table));
- make10();
- ts.exec("deletemany -f -c cf:col4,cf:col5");
- assertEquals(8, countkeys(table));
- make10();
- ts.exec("deletemany -f -r row3");
- assertEquals(9, countkeys(table));
- make10();
- ts.exec("deletemany -f -r row3");
- assertEquals(9, countkeys(table));
- make10();
- ts.exec("deletemany -f -b row3 -be -e row5 -ee");
- assertEquals(9, countkeys(table));
- ts.exec("deletetable -f " + table);
- }
-
- @Test
- public void deleterows() throws Exception {
- final String table = name.getMethodName();
-
- ts.exec("createtable " + table);
- final String tableId = getTableId(table);
-
- // deleterows
- int base = countFiles(tableId);
- assertEquals(0, base);
-
- log.info("Adding 2 splits");
- ts.exec("addsplits row5 row7");
-
- log.info("Writing 10 records");
- make10();
-
- log.info("Flushing table");
- ts.exec("flush -w -t " + table);
- log.info("Table flush completed");
-
- // One of the tablets we're writing to might migrate inbetween writing data which would create a 2nd file for that tablet
- // If we notice this, compact and then move on.
- List<String> files = getFiles(tableId);
- if (3 < files.size()) {
- log.info("More than 3 files were found, compacting before proceeding");
- ts.exec("compact -w -t " + table);
- files = getFiles(tableId);
- assertEquals("Expected to only find 3 files after compaction: " + files, 3, files.size());
- }
-
- assertNotNull(files);
- assertEquals("Found the following files: " + files, 3, files.size());
- ts.exec("deleterows -t " + table + " -b row5 -e row7");
- assertEquals(2, countFiles(tableId));
- ts.exec("deletetable -f " + table);
- }
-
- @Test
- public void groups() throws Exception {
- final String table = name.getMethodName();
-
- ts.exec("createtable " + table);
- ts.exec("setgroups -t " + table + " alpha=a,b,c num=3,2,1");
- ts.exec("getgroups -t " + table, true, "alpha=a,b,c", true);
- ts.exec("getgroups -t " + table, true, "num=1,2,3", true);
- ts.exec("deletetable -f " + table);
- }
-
- @Test
- public void extensions() throws Exception {
- String extName = "ExampleShellExtension";
-
- // check for example extension
- ts.exec("help", true, extName, false);
- ts.exec("extensions -l", true, extName, false);
-
- // enable extensions and check for example
- ts.exec("extensions -e", true);
- ts.exec("extensions -l", true, extName, true);
- ts.exec("help", true, extName, true);
-
- // test example extension command
- ts.exec(extName + "::debug", true, "This is a test", true);
-
- // disable extensions and check for example
- ts.exec("extensions -d", true);
- ts.exec("extensions -l", true, extName, false);
- ts.exec("help", true, extName, false);
-
- // ensure extensions are really disabled
- ts.exec(extName + "::debug", true, "Unknown command", true);
- }
-
- @Test
- public void grep() throws Exception {
- final String table = name.getMethodName();
-
- ts.exec("createtable " + table, true);
- make10();
- ts.exec("grep row[123]", true, "row1", false);
- ts.exec("grep row5", true, "row5", true);
- ts.exec("deletetable -f " + table, true);
- }
-
- @Test
- public void help() throws Exception {
- ts.exec("help -np", true, "Help Commands", true);
- ts.exec("?", true, "Help Commands", true);
- for (String c : ("bye exit quit " + "about help info ? " + "deleteiter deletescaniter listiter setiter setscaniter "
- + "grant revoke systempermissions tablepermissions userpermissions " + "execfile history " + "authenticate cls clear notable sleep table user whoami "
- + "clonetable config createtable deletetable droptable du exporttable importtable offline online renametable tables "
- + "addsplits compact constraint flush getgropus getsplits merge setgroups " + "addauths createuser deleteuser dropuser getauths passwd setauths users "
- + "delete deletemany deleterows egrep formatter interpreter grep importdirectory insert maxrow scan").split(" ")) {
- ts.exec("help " + c, true);
- }
- }
-
- // @Test(timeout = 45000)
- public void history() throws Exception {
- final String table = name.getMethodName();
-
- ts.exec("history -c", true);
- ts.exec("createtable " + table);
- ts.exec("deletetable -f " + table);
- ts.exec("history", true, table, true);
- ts.exec("history", true, "history", true);
- }
-
- @Test
- public void importDirectory() throws Exception {
- final String table = name.getMethodName();
-
- Configuration conf = new Configuration();
- FileSystem fs = FileSystem.get(conf);
- File importDir = new File(rootPath, "import");
- assertTrue(importDir.mkdir());
- String even = new File(importDir, "even.rf").toString();
- String odd = new File(importDir, "odd.rf").toString();
- File errorsDir = new File(rootPath, "errors");
- assertTrue(errorsDir.mkdir());
- fs.mkdirs(new Path(errorsDir.toString()));
- AccumuloConfiguration aconf = AccumuloConfiguration.getDefaultConfiguration();
- FileSKVWriter evenWriter = FileOperations.getInstance().openWriter(even, fs, conf, aconf);
- evenWriter.startDefaultLocalityGroup();
- FileSKVWriter oddWriter = FileOperations.getInstance().openWriter(odd, fs, conf, aconf);
- oddWriter.startDefaultLocalityGroup();
- long timestamp = System.currentTimeMillis();
- Text cf = new Text("cf");
- Text cq = new Text("cq");
- Value value = new Value("value".getBytes());
- for (int i = 0; i < 100; i += 2) {
- Key key = new Key(new Text(String.format("%8d", i)), cf, cq, timestamp);
- evenWriter.append(key, value);
- key = new Key(new Text(String.format("%8d", i + 1)), cf, cq, timestamp);
- oddWriter.append(key, value);
- }
- evenWriter.close();
- oddWriter.close();
- assertEquals(0, ts.shell.getExitCode());
- ts.exec("createtable " + table, true);
- ts.exec("importdirectory " + importDir + " " + errorsDir + " true", true);
- ts.exec("scan -r 00000000", true, "00000000", true);
- ts.exec("scan -r 00000099", true, "00000099", true);
- ts.exec("deletetable -f " + table);
- }
-
- @Test
- public void info() throws Exception {
- ts.exec("info", true, Constants.VERSION, true);
- }
-
- @Test
- public void interpreter() throws Exception {
- final String table = name.getMethodName();
-
- ts.exec("createtable " + table, true);
- ts.exec("interpreter -l", true, "HexScan", false);
- ts.exec("insert \\x02 cf cq value", true);
- ts.exec("scan -b 02", true, "value", false);
- ts.exec("interpreter -i org.apache.accumulo.core.util.interpret.HexScanInterpreter", true);
- // Need to allow time for this to propagate through zoocache/zookeeper
- UtilWaitThread.sleep(3000);
-
- ts.exec("interpreter -l", true, "HexScan", true);
- ts.exec("scan -b 02", true, "value", true);
- ts.exec("deletetable -f " + table, true);
- }
-
- @Test
- public void listcompactions() throws Exception {
- final String table = name.getMethodName();
-
- ts.exec("createtable " + table, true);
- ts.exec("config -t " + table + " -s table.iterator.minc.slow=30,org.apache.accumulo.test.functional.SlowIterator", true);
- ts.exec("config -t " + table + " -s table.iterator.minc.slow.opt.sleepTime=1000", true);
- ts.exec("insert a cf cq value", true);
- ts.exec("insert b cf cq value", true);
- ts.exec("insert c cf cq value", true);
- ts.exec("insert d cf cq value", true);
- ts.exec("flush -t " + table, true);
- ts.exec("sleep 0.2", true);
- ts.exec("listcompactions", true, "default_tablet");
- String[] lines = ts.output.get().split("\n");
- String last = lines[lines.length - 1];
- String[] parts = last.split("\\|");
- assertEquals(12, parts.length);
- ts.exec("deletetable -f " + table, true);
- }
-
- @Test
- public void maxrow() throws Exception {
- final String table = name.getMethodName();
-
- ts.exec("createtable " + table, true);
- ts.exec("insert a cf cq value", true);
- ts.exec("insert b cf cq value", true);
- ts.exec("insert ccc cf cq value", true);
- ts.exec("insert zzz cf cq value", true);
- ts.exec("maxrow", true, "zzz", true);
- ts.exec("delete zzz cf cq", true);
- ts.exec("maxrow", true, "ccc", true);
- ts.exec("deletetable -f " + table, true);
- }
-
- @Test
- public void merge() throws Exception {
- final String table = name.getMethodName();
-
- ts.exec("createtable " + table);
- ts.exec("addsplits a m z");
- ts.exec("getsplits", true, "z", true);
- ts.exec("merge --all", true);
- ts.exec("getsplits", true, "z", false);
- ts.exec("deletetable -f " + table);
- ts.exec("getsplits -t " + MetadataTable.NAME + "", true);
- assertEquals(2, ts.output.get().split("\n").length);
- ts.exec("getsplits -t accumulo.root", true);
- assertEquals(1, ts.output.get().split("\n").length);
- ts.exec("merge --all -t " + MetadataTable.NAME + "");
- ts.exec("getsplits -t " + MetadataTable.NAME + "", true);
- assertEquals(1, ts.output.get().split("\n").length);
- }
-
- @Test
- public void ping() throws Exception {
- for (int i = 0; i < 10; i++) {
- ts.exec("ping", true, "OK", true);
- // wait for both tservers to start up
- if (ts.output.get().split("\n").length == 3)
- break;
- UtilWaitThread.sleep(1000);
-
- }
- assertEquals(3, ts.output.get().split("\n").length);
- }
-
- @Test
- public void renametable() throws Exception {
- final String table = name.getMethodName() + "1", rename = name.getMethodName() + "2";
-
- ts.exec("createtable " + table);
- ts.exec("insert this is a value");
- ts.exec("renametable " + table + " " + rename);
- ts.exec("tables", true, rename, true);
- ts.exec("tables", true, table, false);
- ts.exec("scan -t " + rename, true, "value", true);
- ts.exec("deletetable -f " + rename, true);
- }
-
- @Test
- public void tables() throws Exception {
- final String table = name.getMethodName(), table1 = table + "_z", table2 = table + "_a";
- ts.exec("createtable " + table1);
- ts.exec("createtable " + table2);
- ts.exec("notable");
- String lst = ts.exec("tables -l");
- assertTrue(lst.indexOf(table2) < lst.indexOf(table1));
- lst = ts.exec("tables -l -s");
- assertTrue(lst.indexOf(table1) < lst.indexOf(table2));
- }
-
- @Test
- public void systempermission() throws Exception {
- ts.exec("systempermissions");
- assertEquals(12, ts.output.get().split("\n").length - 1);
- ts.exec("tablepermissions", true);
- assertEquals(6, ts.output.get().split("\n").length - 1);
- }
-
- @Test
- public void listscans() throws Exception {
- final String table = name.getMethodName();
-
- ts.exec("createtable " + table, true);
-
- // Should be about a 3 second scan
- for (int i = 0; i < 6; i++) {
- ts.exec("insert " + i + " cf cq value", true);
- }
- Connector connector = getConnector();
- final Scanner s = connector.createScanner(table, Authorizations.EMPTY);
- IteratorSetting cfg = new IteratorSetting(30, SlowIterator.class);
- SlowIterator.setSleepTime(cfg, 500);
- s.addScanIterator(cfg);
-
- Thread thread = new Thread() {
- @Override
- public void run() {
- try {
- Iterators.size(s.iterator());
- } catch (Exception ex) {
- throw new RuntimeException(ex);
- }
- }
- };
- thread.start();
-
- List<String> scans = new ArrayList<String>();
- // Try to find the active scan for about 15seconds
- for (int i = 0; i < 50 && scans.isEmpty(); i++) {
- String currentScans = ts.exec("listscans", true);
- log.info("Got output from listscans:\n" + currentScans);
- String[] lines = currentScans.split("\n");
- for (int scanOffset = 2; scanOffset < lines.length; scanOffset++) {
- String currentScan = lines[scanOffset];
- if (currentScan.contains(table)) {
- log.info("Retaining scan: " + currentScan);
- scans.add(currentScan);
- } else {
- log.info("Ignoring scan because of wrong table: " + currentScan);
- }
- }
- UtilWaitThread.sleep(300);
- }
- thread.join();
-
- assertFalse("Could not find any active scans over table " + table, scans.isEmpty());
-
- for (String scan : scans) {
- if (!scan.contains("RUNNING")) {
- log.info("Ignoring scan because it doesn't contain 'RUNNING': " + scan);
- continue;
- }
- String parts[] = scan.split("\\|");
- assertEquals("Expected 13 colums, but found " + parts.length + " instead for '" + Arrays.toString(parts) + "'", 13, parts.length);
- String tserver = parts[0].trim();
- // TODO: any way to tell if the client address is accurate? could be local IP, host, loopback...?
- String hostPortPattern = ".+:\\d+";
- assertTrue(tserver.matches(hostPortPattern));
- assertTrue(getConnector().instanceOperations().getTabletServers().contains(tserver));
- String client = parts[1].trim();
- assertTrue(client.matches(hostPortPattern));
- }
-
- ts.exec("deletetable -f " + table, true);
- }
-
- @Test
- public void testPertableClasspath() throws Exception {
- final String table = name.getMethodName();
-
- File fooFilterJar = File.createTempFile("FooFilter", ".jar", new File(rootPath));
-
- FileUtils.copyURLToFile(this.getClass().getResource("/FooFilter.jar"), fooFilterJar);
- fooFilterJar.deleteOnExit();
-
- File fooConstraintJar = File.createTempFile("FooConstraint", ".jar", new File(rootPath));
- FileUtils.copyURLToFile(this.getClass().getResource("/FooConstraint.jar"), fooConstraintJar);
- fooConstraintJar.deleteOnExit();
-
- ts.exec("config -s " + Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "cx1=" + fooFilterJar.toURI().toString() + ","
- + fooConstraintJar.toURI().toString(), true);
-
- ts.exec("createtable " + table, true);
- ts.exec("config -t " + table + " -s " + Property.TABLE_CLASSPATH.getKey() + "=cx1", true);
-
- UtilWaitThread.sleep(200);
-
- // We can't use the setiter command as Filter implements OptionDescriber which
- // forces us to enter more input that I don't know how to input
- // Instead, we can just manually set the property on the table.
- ts.exec("config -t " + table + " -s " + Property.TABLE_ITERATOR_PREFIX.getKey() + "scan.foo=10,org.apache.accumulo.test.FooFilter");
-
- ts.exec("insert foo f q v", true);
-
- UtilWaitThread.sleep(100);
-
- ts.exec("scan -np", true, "foo", false);
-
- ts.exec("constraint -a FooConstraint", true);
-
- ts.exec("offline -w " + table);
- ts.exec("online -w " + table);
-
- ts.exec("table " + table, true);
- ts.exec("insert foo f q v", false);
- ts.exec("insert ok foo q v", true);
-
- ts.exec("deletetable -f " + table, true);
- ts.exec("config -d " + Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "cx1");
-
- }
-
- @Test
- public void trace() throws Exception {
- // Make sure to not collide with the "trace" table
- final String table = name.getMethodName() + "Test";
-
- ts.exec("trace on", true);
- ts.exec("createtable " + table, true);
- ts.exec("insert a b c value", true);
- ts.exec("scan -np", true, "value", true);
- ts.exec("deletetable -f " + table);
- ts.exec("sleep 1");
- String trace = ts.exec("trace off");
- System.out.println(trace);
- assertTrue(trace.contains("sendMutations"));
- assertTrue(trace.contains("startScan"));
- assertTrue(trace.contains("DeleteTable"));
- }
-
- @Test
- public void badLogin() throws Exception {
- // Can't run with Kerberos, can't switch identity in shell presently
- Assume.assumeTrue(getToken() instanceof PasswordToken);
- ts.input.set(getRootPassword() + "\n");
- String err = ts.exec("user NoSuchUser", false);
- assertTrue(err.contains("BAD_CREDENTIALS for user NoSuchUser"));
- }
-
- @Test
- public void namespaces() throws Exception {
- ts.exec("namespaces", true, "\"\"", true); // default namespace, displayed as quoted empty string
- ts.exec("namespaces", true, Namespaces.ACCUMULO_NAMESPACE, true);
- ts.exec("createnamespace thing1", true);
- String namespaces = ts.exec("namespaces");
- assertTrue(namespaces.contains("thing1"));
-
- ts.exec("renamenamespace thing1 thing2");
- namespaces = ts.exec("namespaces");
- assertTrue(namespaces.contains("thing2"));
- assertTrue(!namespaces.contains("thing1"));
-
- // can't delete a namespace that still contains tables, unless you do -f
- ts.exec("createtable thing2.thingy", true);
- ts.exec("deletenamespace thing2");
- ts.exec("y");
- ts.exec("namespaces", true, "thing2", true);
-
- ts.exec("du -ns thing2", true, "thing2.thingy", true);
-
- // all "TableOperation" commands can take a namespace
- ts.exec("offline -ns thing2", true);
- ts.exec("online -ns thing2", true);
- ts.exec("flush -ns thing2", true);
- ts.exec("compact -ns thing2", true);
- ts.exec("createnamespace testers3", true);
- ts.exec("createtable testers3.1", true);
- ts.exec("createtable testers3.2", true);
- ts.exec("deletetable -ns testers3 -f", true);
- ts.exec("tables", true, "testers3.1", false);
- ts.exec("namespaces", true, "testers3", true);
- ts.exec("deletenamespace testers3 -f", true);
- ts.input.set("true\n\n\nSTRING\n");
- ts.exec("setiter -ns thing2 -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n name", true);
- ts.exec("listiter -ns thing2 -scan", true, "Summing", true);
- ts.exec("deleteiter -ns thing2 -n name -scan", true);
- ts.exec("createuser dude");
- ts.exec("pass");
- ts.exec("pass");
- ts.exec("grant Namespace.CREATE_TABLE -ns thing2 -u dude", true);
- ts.exec("revoke Namespace.CREATE_TABLE -ns thing2 -u dude", true);
-
- // properties override and such
- ts.exec("config -ns thing2 -s table.file.max=44444", true);
- ts.exec("config -ns thing2", true, "44444", true);
- ts.exec("config -t thing2.thingy", true, "44444", true);
- ts.exec("config -t thing2.thingy -s table.file.max=55555", true);
- ts.exec("config -t thing2.thingy", true, "55555", true);
-
- // can copy properties when creating
- ts.exec("createnamespace thing3 -cc thing2", true);
- ts.exec("config -ns thing3", true, "44444", true);
-
- ts.exec("deletenamespace -f thing2", true);
- ts.exec("namespaces", true, "thing2", false);
- ts.exec("tables", true, "thing2.thingy", false);
-
- // put constraints on a namespace
- ts.exec("constraint -ns thing3 -a org.apache.accumulo.examples.simple.constraints.NumericValueConstraint", true);
- ts.exec("createtable thing3.constrained", true);
- ts.exec("table thing3.constrained", true);
- ts.exec("constraint -d 1");
- // should fail
- ts.exec("constraint -l", true, "NumericValueConstraint", true);
- ts.exec("insert r cf cq abc", false);
- ts.exec("constraint -ns thing3 -d 1");
- ts.exec("sleep 1");
- ts.exec("insert r cf cq abc", true);
- }
-
- private int countkeys(String table) throws IOException {
- ts.exec("scan -np -t " + table);
- return ts.output.get().split("\n").length - 1;
- }
-
- @Test
- public void scans() throws Exception {
- ts.exec("createtable t");
- make10();
- String result = ts.exec("scan -np -b row1 -e row1");
- assertEquals(2, result.split("\n").length);
- result = ts.exec("scan -np -b row3 -e row5");
- assertEquals(4, result.split("\n").length);
- result = ts.exec("scan -np -r row3");
- assertEquals(2, result.split("\n").length);
- result = ts.exec("scan -np -b row:");
- assertEquals(1, result.split("\n").length);
- result = ts.exec("scan -np -b row");
- assertEquals(11, result.split("\n").length);
- result = ts.exec("scan -np -e row:");
- assertEquals(11, result.split("\n").length);
- ts.exec("deletetable -f t");
- }
-
- @Test
- public void whoami() throws Exception {
- AuthenticationToken token = getToken();
- assertTrue(ts.exec("whoami", true).contains(getPrincipal()));
- // Unnecessary with Kerberos enabled, won't prompt for a password
- if (token instanceof PasswordToken) {
- ts.input.set("secret\nsecret\n");
- }
- ts.exec("createuser test_user");
- ts.exec("setauths -u test_user -s 12,3,4");
- String auths = ts.exec("getauths -u test_user");
- assertTrue(auths.contains("3") && auths.contains("12") && auths.contains("4"));
- // No support to switch users within the shell with Kerberos
- if (token instanceof PasswordToken) {
- ts.input.set("secret\n");
- ts.exec("user test_user", true);
- assertTrue(ts.exec("whoami", true).contains("test_user"));
- ts.input.set(getRootPassword() + "\n");
- ts.exec("user root", true);
- }
- }
-
- private void make10() throws IOException {
- for (int i = 0; i < 10; i++) {
- ts.exec(String.format("insert row%d cf col%d value", i, i));
- }
- }
-
- private List<String> getFiles(String tableId) throws IOException {
- ts.output.clear();
-
- ts.exec("scan -t " + MetadataTable.NAME + " -np -c file -b " + tableId + " -e " + tableId + "~");
-
- log.debug("countFiles(): " + ts.output.get());
-
- String[] lines = StringUtils.split(ts.output.get(), "\n");
- ts.output.clear();
-
- if (0 == lines.length) {
- return Collections.emptyList();
- }
-
- return Arrays.asList(Arrays.copyOfRange(lines, 1, lines.length));
- }
-
- private int countFiles(String tableId) throws IOException {
- return getFiles(tableId).size();
- }
-
- private String getTableId(String tableName) throws Exception {
- Connector conn = getConnector();
-
- for (int i = 0; i < 5; i++) {
- Map<String,String> nameToId = conn.tableOperations().tableIdMap();
- if (nameToId.containsKey(tableName)) {
- return nameToId.get(tableName);
- } else {
- Thread.sleep(1000);
- }
- }
-
- fail("Could not find ID for table: " + tableName);
- // Will never get here
- return null;
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/SplitCancelsMajCIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/SplitCancelsMajCIT.java b/test/src/test/java/org/apache/accumulo/test/SplitCancelsMajCIT.java
deleted file mode 100644
index 4cad3a7..0000000
--- a/test/src/test/java/org/apache/accumulo/test/SplitCancelsMajCIT.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertTrue;
-
-import java.util.EnumSet;
-import java.util.SortedSet;
-import java.util.TreeSet;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.SharedMiniClusterBase;
-import org.apache.accumulo.test.functional.SlowIterator;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-// ACCUMULO-2862
-public class SplitCancelsMajCIT extends SharedMiniClusterBase {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void test() throws Exception {
- final String tableName = getUniqueNames(1)[0];
- final Connector c = getConnector();
- c.tableOperations().create(tableName);
- // majc should take 100 * .5 secs
- IteratorSetting it = new IteratorSetting(100, SlowIterator.class);
- SlowIterator.setSleepTime(it, 500);
- c.tableOperations().attachIterator(tableName, it, EnumSet.of(IteratorScope.majc));
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- for (int i = 0; i < 100; i++) {
- Mutation m = new Mutation("" + i);
- m.put("", "", new Value());
- bw.addMutation(m);
- }
- bw.flush();
- // start majc
- final AtomicReference<Exception> ex = new AtomicReference<Exception>();
- Thread thread = new Thread() {
- @Override
- public void run() {
- try {
- c.tableOperations().compact(tableName, null, null, true, true);
- } catch (Exception e) {
- ex.set(e);
- }
- }
- };
- thread.start();
-
- long now = System.currentTimeMillis();
- UtilWaitThread.sleep(10 * 1000);
- // split the table, interrupts the compaction
- SortedSet<Text> partitionKeys = new TreeSet<Text>();
- partitionKeys.add(new Text("10"));
- c.tableOperations().addSplits(tableName, partitionKeys);
- thread.join();
- // wait for the restarted compaction
- assertTrue(System.currentTimeMillis() - now > 59 * 1000);
- if (ex.get() != null)
- throw ex.get();
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/SplitRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/SplitRecoveryIT.java b/test/src/test/java/org/apache/accumulo/test/SplitRecoveryIT.java
deleted file mode 100644
index 298c761..0000000
--- a/test/src/test/java/org/apache/accumulo/test/SplitRecoveryIT.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class SplitRecoveryIT extends AccumuloClusterHarness {
-
- private Mutation m(String row) {
- Mutation result = new Mutation(row);
- result.put("cf", "cq", new Value("value".getBytes()));
- return result;
- }
-
- boolean isOffline(String tablename, Connector connector) throws TableNotFoundException {
- String tableId = connector.tableOperations().tableIdMap().get(tablename);
- Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- scanner.setRange(new Range(new Text(tableId + ";"), new Text(tableId + "<")));
- scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
- return Iterators.size(scanner.iterator()) == 0;
- }
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void test() throws Exception {
-
- String tableName = getUniqueNames(1)[0];
-
- for (int tn = 0; tn < 2; tn++) {
-
- Connector connector = getConnector();
- // create a table and put some data in it
- connector.tableOperations().create(tableName);
- BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
- bw.addMutation(m("a"));
- bw.addMutation(m("b"));
- bw.addMutation(m("c"));
- bw.close();
- // take the table offline
- connector.tableOperations().offline(tableName);
- while (!isOffline(tableName, connector))
- UtilWaitThread.sleep(200);
-
- // poke a partial split into the metadata table
- connector.securityOperations().grantTablePermission(getAdminPrincipal(), MetadataTable.NAME, TablePermission.WRITE);
- String tableId = connector.tableOperations().tableIdMap().get(tableName);
-
- KeyExtent extent = new KeyExtent(new Text(tableId), null, new Text("b"));
- Mutation m = extent.getPrevRowUpdateMutation();
-
- TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(0.5).getBytes()));
- TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(null));
- bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
- bw.addMutation(m);
-
- if (tn == 1) {
-
- bw.flush();
-
- Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- scanner.setRange(extent.toMetadataRange());
- scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
-
- KeyExtent extent2 = new KeyExtent(new Text(tableId), new Text("b"), null);
- m = extent2.getPrevRowUpdateMutation();
- TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t2".getBytes()));
- TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value("M0".getBytes()));
-
- for (Entry<Key,Value> entry : scanner) {
- m.put(DataFileColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
- }
-
- bw.addMutation(m);
- }
-
- bw.close();
- // bring the table online
- connector.tableOperations().online(tableName);
-
- // verify the tablets went online
- Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
- int i = 0;
- String expected[] = {"a", "b", "c"};
- for (Entry<Key,Value> entry : scanner) {
- assertEquals(expected[i], entry.getKey().getRow().toString());
- i++;
- }
- assertEquals(3, i);
-
- connector.tableOperations().delete(tableName);
-
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java b/test/src/test/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
deleted file mode 100644
index 1dd964c..0000000
--- a/test/src/test/java/org/apache/accumulo/test/TableConfigurationUpdateIT.java
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import java.util.ArrayList;
-import java.util.Random;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.Namespaces;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.server.conf.NamespaceConfiguration;
-import org.apache.accumulo.server.conf.TableConfiguration;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class TableConfigurationUpdateIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(TableConfigurationUpdateIT.class);
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector conn = getConnector();
- Instance inst = conn.getInstance();
-
- String table = getUniqueNames(1)[0];
- conn.tableOperations().create(table);
-
- final NamespaceConfiguration defaultConf = new NamespaceConfiguration(Namespaces.DEFAULT_NAMESPACE_ID, inst,
- AccumuloConfiguration.getDefaultConfiguration());
-
- // Cache invalidates 25% of the time
- int randomMax = 4;
- // Number of threads
- int numThreads = 2;
- // Number of iterations per thread
- int iterations = 100000;
- AccumuloConfiguration tableConf = new TableConfiguration(inst, table, defaultConf);
-
- long start = System.currentTimeMillis();
- ExecutorService svc = Executors.newFixedThreadPool(numThreads);
- CountDownLatch countDown = new CountDownLatch(numThreads);
- ArrayList<Future<Exception>> futures = new ArrayList<Future<Exception>>(numThreads);
-
- for (int i = 0; i < numThreads; i++) {
- futures.add(svc.submit(new TableConfRunner(randomMax, iterations, tableConf, countDown)));
- }
-
- svc.shutdown();
- Assert.assertTrue(svc.awaitTermination(60, TimeUnit.MINUTES));
-
- for (Future<Exception> fut : futures) {
- Exception e = fut.get();
- if (null != e) {
- Assert.fail("Thread failed with exception " + e);
- }
- }
-
- long end = System.currentTimeMillis();
- log.debug(tableConf + " with " + iterations + " iterations and " + numThreads + " threads and cache invalidates " + ((1. / randomMax) * 100.) + "% took "
- + (end - start) / 1000 + " second(s)");
- }
-
- public static class TableConfRunner implements Callable<Exception> {
- private static final Property prop = Property.TABLE_SPLIT_THRESHOLD;
- private AccumuloConfiguration tableConf;
- private CountDownLatch countDown;
- private int iterations, randMax;
-
- public TableConfRunner(int randMax, int iterations, AccumuloConfiguration tableConf, CountDownLatch countDown) {
- this.randMax = randMax;
- this.iterations = iterations;
- this.tableConf = tableConf;
- this.countDown = countDown;
- }
-
- @Override
- public Exception call() {
- Random r = new Random();
- countDown.countDown();
- try {
- countDown.await();
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- return e;
- }
-
- String t = Thread.currentThread().getName() + " ";
- try {
- for (int i = 0; i < iterations; i++) {
- // if (i % 10000 == 0) {
- // log.info(t + " " + i);
- // }
- int choice = r.nextInt(randMax);
- if (choice < 1) {
- tableConf.invalidateCache();
- } else {
- tableConf.get(prop);
- }
- }
- } catch (Exception e) {
- log.error(t, e);
- return e;
- }
-
- return null;
- }
-
- }
-
-}
[32/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java b/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java
new file mode 100644
index 0000000..aa8313e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/KerberosIT.java
@@ -0,0 +1,573 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.lang.reflect.UndeclaredThrowableException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.CompactionConfig;
+import org.apache.accumulo.core.client.admin.DelegationTokenConfig;
+import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier;
+import org.apache.accumulo.core.client.impl.DelegationTokenImpl;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.security.SystemPermission;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.harness.AccumuloITBase;
+import org.apache.accumulo.harness.MiniClusterConfigurationCallback;
+import org.apache.accumulo.harness.MiniClusterHarness;
+import org.apache.accumulo.harness.TestingKdc;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Sets;
+
+/**
+ * MAC test which uses {@link MiniKdc} to simulate ta secure environment. Can be used as a sanity check for Kerberos/SASL testing.
+ */
+public class KerberosIT extends AccumuloITBase {
+ private static final Logger log = LoggerFactory.getLogger(KerberosIT.class);
+
+ private static TestingKdc kdc;
+ private static String krbEnabledForITs = null;
+ private static ClusterUser rootUser;
+
+ @BeforeClass
+ public static void startKdc() throws Exception {
+ kdc = new TestingKdc();
+ kdc.start();
+ krbEnabledForITs = System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION);
+ if (null == krbEnabledForITs || !Boolean.parseBoolean(krbEnabledForITs)) {
+ System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, "true");
+ }
+ rootUser = kdc.getRootUser();
+ }
+
+ @AfterClass
+ public static void stopKdc() throws Exception {
+ if (null != kdc) {
+ kdc.stop();
+ }
+ if (null != krbEnabledForITs) {
+ System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs);
+ }
+ }
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60 * 5;
+ }
+
+ private MiniAccumuloClusterImpl mac;
+
+ @Before
+ public void startMac() throws Exception {
+ MiniClusterHarness harness = new MiniClusterHarness();
+ mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
+ Map<String,String> site = cfg.getSiteConfig();
+ site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "10s");
+ cfg.setSiteConfig(site);
+ }
+
+ });
+
+ mac.getConfig().setNumTservers(1);
+ mac.start();
+ // Enabled kerberos auth
+ Configuration conf = new Configuration(false);
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+ }
+
+ @After
+ public void stopMac() throws Exception {
+ if (null != mac) {
+ mac.stop();
+ }
+ }
+
+ @Test
+ public void testAdminUser() throws Exception {
+ // Login as the client (provided to `accumulo init` as the "root" user)
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+
+ final Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+
+ // The "root" user should have all system permissions
+ for (SystemPermission perm : SystemPermission.values()) {
+ assertTrue("Expected user to have permission: " + perm, conn.securityOperations().hasSystemPermission(conn.whoami(), perm));
+ }
+
+ // and the ability to modify the root and metadata tables
+ for (String table : Arrays.asList(RootTable.NAME, MetadataTable.NAME)) {
+ assertTrue(conn.securityOperations().hasTablePermission(conn.whoami(), table, TablePermission.ALTER_TABLE));
+ }
+ }
+
+ @Test
+ public void testNewUser() throws Exception {
+ String newUser = testName.getMethodName();
+ final File newUserKeytab = new File(kdc.getKeytabDir(), newUser + ".keytab");
+ if (newUserKeytab.exists() && !newUserKeytab.delete()) {
+ log.warn("Unable to delete {}", newUserKeytab);
+ }
+
+ // Create a new user
+ kdc.createPrincipal(newUserKeytab, newUser);
+
+ newUser = kdc.qualifyUser(newUser);
+
+ // Login as the "root" user
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ log.info("Logged in as {}", rootUser.getPrincipal());
+
+ Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+ log.info("Created connector as {}", rootUser.getPrincipal());
+ assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+ // Make sure the system user doesn't exist -- this will force some RPC to happen server-side
+ createTableWithDataAndCompact(conn);
+
+ HashSet<String> users = Sets.newHashSet(rootUser.getPrincipal());
+ assertEquals(users, conn.securityOperations().listLocalUsers());
+
+ // Switch to a new user
+ UserGroupInformation.loginUserFromKeytab(newUser, newUserKeytab.getAbsolutePath());
+ log.info("Logged in as {}", newUser);
+
+ conn = mac.getConnector(newUser, new KerberosToken());
+ log.info("Created connector as {}", newUser);
+ assertEquals(newUser, conn.whoami());
+
+ // The new user should have no system permissions
+ for (SystemPermission perm : SystemPermission.values()) {
+ assertFalse(conn.securityOperations().hasSystemPermission(newUser, perm));
+ }
+
+ users.add(newUser);
+
+ // Same users as before, plus the new user we just created
+ assertEquals(users, conn.securityOperations().listLocalUsers());
+ }
+
+ @Test
+ public void testUserPrivilegesThroughGrant() throws Exception {
+ String user1 = testName.getMethodName();
+ final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab");
+ if (user1Keytab.exists() && !user1Keytab.delete()) {
+ log.warn("Unable to delete {}", user1Keytab);
+ }
+
+ // Create some new users
+ kdc.createPrincipal(user1Keytab, user1);
+
+ user1 = kdc.qualifyUser(user1);
+
+ // Log in as user1
+ UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
+ log.info("Logged in as {}", user1);
+
+ // Indirectly creates this user when we use it
+ Connector conn = mac.getConnector(user1, new KerberosToken());
+ log.info("Created connector as {}", user1);
+
+ // The new user should have no system permissions
+ for (SystemPermission perm : SystemPermission.values()) {
+ assertFalse(conn.securityOperations().hasSystemPermission(user1, perm));
+ }
+
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+
+ conn.securityOperations().grantSystemPermission(user1, SystemPermission.CREATE_TABLE);
+
+ // Switch back to the original user
+ UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
+ conn = mac.getConnector(user1, new KerberosToken());
+
+ // Shouldn't throw an exception since we granted the create table permission
+ final String table = testName.getMethodName() + "_user_table";
+ conn.tableOperations().create(table);
+
+ // Make sure we can actually use the table we made
+ BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+ Mutation m = new Mutation("a");
+ m.put("b", "c", "d");
+ bw.addMutation(m);
+ bw.close();
+
+ conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
+ }
+
+ @Test
+ public void testUserPrivilegesForTable() throws Exception {
+ String user1 = testName.getMethodName();
+ final File user1Keytab = new File(kdc.getKeytabDir(), user1 + ".keytab");
+ if (user1Keytab.exists() && !user1Keytab.delete()) {
+ log.warn("Unable to delete {}", user1Keytab);
+ }
+
+ // Create some new users -- cannot contain realm
+ kdc.createPrincipal(user1Keytab, user1);
+
+ user1 = kdc.qualifyUser(user1);
+
+ // Log in as user1
+ UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
+ log.info("Logged in as {}", user1);
+
+ // Indirectly creates this user when we use it
+ Connector conn = mac.getConnector(user1, new KerberosToken());
+ log.info("Created connector as {}", user1);
+
+ // The new user should have no system permissions
+ for (SystemPermission perm : SystemPermission.values()) {
+ assertFalse(conn.securityOperations().hasSystemPermission(user1, perm));
+ }
+
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+
+ final String table = testName.getMethodName() + "_user_table";
+ conn.tableOperations().create(table);
+
+ final String viz = "viz";
+
+ // Give our unprivileged user permission on the table we made for them
+ conn.securityOperations().grantTablePermission(user1, table, TablePermission.READ);
+ conn.securityOperations().grantTablePermission(user1, table, TablePermission.WRITE);
+ conn.securityOperations().grantTablePermission(user1, table, TablePermission.ALTER_TABLE);
+ conn.securityOperations().grantTablePermission(user1, table, TablePermission.DROP_TABLE);
+ conn.securityOperations().changeUserAuthorizations(user1, new Authorizations(viz));
+
+ // Switch back to the original user
+ UserGroupInformation.loginUserFromKeytab(user1, user1Keytab.getAbsolutePath());
+ conn = mac.getConnector(user1, new KerberosToken());
+
+ // Make sure we can actually use the table we made
+
+ // Write data
+ final long ts = 1000l;
+ BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+ Mutation m = new Mutation("a");
+ m.put("b", "c", new ColumnVisibility(viz.getBytes()), ts, "d");
+ bw.addMutation(m);
+ bw.close();
+
+ // Compact
+ conn.tableOperations().compact(table, new CompactionConfig().setWait(true).setFlush(true));
+
+ // Alter
+ conn.tableOperations().setProperty(table, Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+
+ // Read (and proper authorizations)
+ Scanner s = conn.createScanner(table, new Authorizations(viz));
+ Iterator<Entry<Key,Value>> iter = s.iterator();
+ assertTrue("No results from iterator", iter.hasNext());
+ Entry<Key,Value> entry = iter.next();
+ assertEquals(new Key("a", "b", "c", viz, ts), entry.getKey());
+ assertEquals(new Value("d".getBytes()), entry.getValue());
+ assertFalse("Had more results from iterator", iter.hasNext());
+ }
+
+ @Test
+ public void testDelegationToken() throws Exception {
+ final String tableName = getUniqueNames(1)[0];
+
+ // Login as the "root" user
+ UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ log.info("Logged in as {}", rootUser.getPrincipal());
+
+ final int numRows = 100, numColumns = 10;
+
+ // As the "root" user, open up the connection and get a delegation token
+ final AuthenticationToken delegationToken = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
+ @Override
+ public AuthenticationToken run() throws Exception {
+ Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+ log.info("Created connector as {}", rootUser.getPrincipal());
+ assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+ conn.tableOperations().create(tableName);
+ BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ for (int r = 0; r < numRows; r++) {
+ Mutation m = new Mutation(Integer.toString(r));
+ for (int c = 0; c < numColumns; c++) {
+ String col = Integer.toString(c);
+ m.put(col, col, col);
+ }
+ bw.addMutation(m);
+ }
+ bw.close();
+
+ return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+ }
+ });
+
+ // The above login with keytab doesn't have a way to logout, so make a fake user that won't have krb credentials
+ UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user", new String[0]);
+ int recordsSeen = userWithoutPrivs.doAs(new PrivilegedExceptionAction<Integer>() {
+ @Override
+ public Integer run() throws Exception {
+ Connector conn = mac.getConnector(rootUser.getPrincipal(), delegationToken);
+
+ BatchScanner bs = conn.createBatchScanner(tableName, Authorizations.EMPTY, 2);
+ bs.setRanges(Collections.singleton(new Range()));
+ int recordsSeen = Iterables.size(bs);
+ bs.close();
+ return recordsSeen;
+ }
+ });
+
+ assertEquals(numRows * numColumns, recordsSeen);
+ }
+
+ @Test
+ public void testDelegationTokenAsDifferentUser() throws Exception {
+ // Login as the "root" user
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ log.info("Logged in as {}", rootUser.getPrincipal());
+
+ // As the "root" user, open up the connection and get a delegation token
+ Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+ log.info("Created connector as {}", rootUser.getPrincipal());
+ assertEquals(rootUser.getPrincipal(), conn.whoami());
+ final AuthenticationToken delegationToken = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+
+ // The above login with keytab doesn't have a way to logout, so make a fake user that won't have krb credentials
+ UserGroupInformation userWithoutPrivs = UserGroupInformation.createUserForTesting("fake_user", new String[0]);
+ try {
+ // Use the delegation token to try to log in as a different user
+ userWithoutPrivs.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ mac.getConnector("some_other_user", delegationToken);
+ return null;
+ }
+ });
+ fail("Using a delegation token as a different user should throw an exception");
+ } catch (UndeclaredThrowableException e) {
+ Throwable cause = e.getCause();
+ assertNotNull(cause);
+ // We should get an AccumuloSecurityException from trying to use a delegation token for the wrong user
+ assertTrue("Expected cause to be AccumuloSecurityException, but was " + cause.getClass(), cause instanceof AccumuloSecurityException);
+ }
+ }
+
+ @Test(expected = AccumuloSecurityException.class)
+ public void testGetDelegationTokenDenied() throws Exception {
+ String newUser = testName.getMethodName();
+ final File newUserKeytab = new File(kdc.getKeytabDir(), newUser + ".keytab");
+ if (newUserKeytab.exists() && !newUserKeytab.delete()) {
+ log.warn("Unable to delete {}", newUserKeytab);
+ }
+
+ // Create a new user
+ kdc.createPrincipal(newUserKeytab, newUser);
+
+ newUser = kdc.qualifyUser(newUser);
+
+ // Login as a normal user
+ UserGroupInformation.loginUserFromKeytab(newUser, newUserKeytab.getAbsolutePath());
+
+ // As the "root" user, open up the connection and get a delegation token
+ Connector conn = mac.getConnector(newUser, new KerberosToken());
+ log.info("Created connector as {}", newUser);
+ assertEquals(newUser, conn.whoami());
+
+ conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+ }
+
+ @Test
+ public void testRestartedMasterReusesSecretKey() throws Exception {
+ // Login as the "root" user
+ UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ log.info("Logged in as {}", rootUser.getPrincipal());
+
+ // As the "root" user, open up the connection and get a delegation token
+ final AuthenticationToken delegationToken1 = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
+ @Override
+ public AuthenticationToken run() throws Exception {
+ Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+ log.info("Created connector as {}", rootUser.getPrincipal());
+ assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+ AuthenticationToken token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+
+ assertTrue("Could not get tables with delegation token", mac.getConnector(rootUser.getPrincipal(), token).tableOperations().list().size() > 0);
+
+ return token;
+ }
+ });
+
+ log.info("Stopping master");
+ mac.getClusterControl().stop(ServerType.MASTER);
+ Thread.sleep(5000);
+ log.info("Restarting master");
+ mac.getClusterControl().start(ServerType.MASTER);
+
+ // Make sure our original token is still good
+ root.doAs(new PrivilegedExceptionAction<Void>() {
+ @Override
+ public Void run() throws Exception {
+ Connector conn = mac.getConnector(rootUser.getPrincipal(), delegationToken1);
+
+ assertTrue("Could not get tables with delegation token", conn.tableOperations().list().size() > 0);
+
+ return null;
+ }
+ });
+
+ // Get a new token, so we can compare the keyId on the second to the first
+ final AuthenticationToken delegationToken2 = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
+ @Override
+ public AuthenticationToken run() throws Exception {
+ Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+ log.info("Created connector as {}", rootUser.getPrincipal());
+ assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+ AuthenticationToken token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
+
+ assertTrue("Could not get tables with delegation token", mac.getConnector(rootUser.getPrincipal(), token).tableOperations().list().size() > 0);
+
+ return token;
+ }
+ });
+
+ // A restarted master should reuse the same secret key after a restart if the secret key hasn't expired (1day by default)
+ DelegationTokenImpl dt1 = (DelegationTokenImpl) delegationToken1;
+ DelegationTokenImpl dt2 = (DelegationTokenImpl) delegationToken2;
+ assertEquals(dt1.getIdentifier().getKeyId(), dt2.getIdentifier().getKeyId());
+ }
+
+ @Test(expected = AccumuloException.class)
+ public void testDelegationTokenWithInvalidLifetime() throws Throwable {
+ // Login as the "root" user
+ UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ log.info("Logged in as {}", rootUser.getPrincipal());
+
+ // As the "root" user, open up the connection and get a delegation token
+ try {
+ root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
+ @Override
+ public AuthenticationToken run() throws Exception {
+ Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+ log.info("Created connector as {}", rootUser.getPrincipal());
+ assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+ // Should fail
+ return conn.securityOperations().getDelegationToken(new DelegationTokenConfig().setTokenLifetime(Long.MAX_VALUE, TimeUnit.MILLISECONDS));
+ }
+ });
+ } catch (UndeclaredThrowableException e) {
+ Throwable cause = e.getCause();
+ if (null != cause) {
+ throw cause;
+ } else {
+ throw e;
+ }
+ }
+ }
+
+ @Test
+ public void testDelegationTokenWithReducedLifetime() throws Throwable {
+ // Login as the "root" user
+ UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ log.info("Logged in as {}", rootUser.getPrincipal());
+
+ // As the "root" user, open up the connection and get a delegation token
+ final AuthenticationToken dt = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
+ @Override
+ public AuthenticationToken run() throws Exception {
+ Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
+ log.info("Created connector as {}", rootUser.getPrincipal());
+ assertEquals(rootUser.getPrincipal(), conn.whoami());
+
+ return conn.securityOperations().getDelegationToken(new DelegationTokenConfig().setTokenLifetime(5, TimeUnit.MINUTES));
+ }
+ });
+
+ AuthenticationTokenIdentifier identifier = ((DelegationTokenImpl) dt).getIdentifier();
+ assertTrue("Expected identifier to expire in no more than 5 minutes: " + identifier,
+ identifier.getExpirationDate() - identifier.getIssueDate() <= (5 * 60 * 1000));
+ }
+
+ /**
+ * Creates a table, adds a record to it, and then compacts the table. A simple way to make sure that the system user exists (since the master does an RPC to
+ * the tserver which will create the system user if it doesn't already exist).
+ */
+ private void createTableWithDataAndCompact(Connector conn) throws TableNotFoundException, AccumuloSecurityException, AccumuloException, TableExistsException {
+ final String table = testName.getMethodName() + "_table";
+ conn.tableOperations().create(table);
+ BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+ Mutation m = new Mutation("a");
+ m.put("b", "c", "d");
+ bw.addMutation(m);
+ bw.close();
+ conn.tableOperations().compact(table, new CompactionConfig().setFlush(true).setWait(true));
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java b/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
new file mode 100644
index 0000000..31d1329
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/KerberosProxyIT.java
@@ -0,0 +1,426 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.net.ConnectException;
+import java.net.InetAddress;
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.rpc.UGIAssumingTransport;
+import org.apache.accumulo.harness.AccumuloITBase;
+import org.apache.accumulo.harness.MiniClusterConfigurationCallback;
+import org.apache.accumulo.harness.MiniClusterHarness;
+import org.apache.accumulo.harness.TestingKdc;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.proxy.Proxy;
+import org.apache.accumulo.proxy.ProxyServer;
+import org.apache.accumulo.proxy.thrift.AccumuloProxy;
+import org.apache.accumulo.proxy.thrift.AccumuloProxy.Client;
+import org.apache.accumulo.proxy.thrift.AccumuloSecurityException;
+import org.apache.accumulo.proxy.thrift.ColumnUpdate;
+import org.apache.accumulo.proxy.thrift.Key;
+import org.apache.accumulo.proxy.thrift.KeyValue;
+import org.apache.accumulo.proxy.thrift.ScanOptions;
+import org.apache.accumulo.proxy.thrift.ScanResult;
+import org.apache.accumulo.proxy.thrift.TimeType;
+import org.apache.accumulo.proxy.thrift.WriterOptions;
+import org.apache.accumulo.server.util.PortUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.thrift.protocol.TCompactProtocol;
+import org.apache.thrift.transport.TSaslClientTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransportException;
+import org.hamcrest.Description;
+import org.hamcrest.TypeSafeMatcher;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tests impersonation of clients by the proxy over SASL
+ */
+public class KerberosProxyIT extends AccumuloITBase {
+ private static final Logger log = LoggerFactory.getLogger(KerberosProxyIT.class);
+
+ @Rule
+ public ExpectedException thrown = ExpectedException.none();
+
+ private static TestingKdc kdc;
+ private static String krbEnabledForITs = null;
+ private static File proxyKeytab;
+ private static String hostname, proxyPrimary, proxyPrincipal;
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60 * 5;
+ }
+
+ @BeforeClass
+ public static void startKdc() throws Exception {
+ kdc = new TestingKdc();
+ kdc.start();
+ krbEnabledForITs = System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION);
+ if (null == krbEnabledForITs || !Boolean.parseBoolean(krbEnabledForITs)) {
+ System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, "true");
+ }
+
+ // Create a principal+keytab for the proxy
+ proxyKeytab = new File(kdc.getKeytabDir(), "proxy.keytab");
+ hostname = InetAddress.getLocalHost().getCanonicalHostName();
+ // Set the primary because the client needs to know it
+ proxyPrimary = "proxy";
+ // Qualify with an instance
+ proxyPrincipal = proxyPrimary + "/" + hostname;
+ kdc.createPrincipal(proxyKeytab, proxyPrincipal);
+ // Tack on the realm too
+ proxyPrincipal = kdc.qualifyUser(proxyPrincipal);
+ }
+
+ @AfterClass
+ public static void stopKdc() throws Exception {
+ if (null != kdc) {
+ kdc.stop();
+ }
+ if (null != krbEnabledForITs) {
+ System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs);
+ }
+ }
+
+ private MiniAccumuloClusterImpl mac;
+ private Process proxyProcess;
+ private int proxyPort;
+
+ @Before
+ public void startMac() throws Exception {
+ MiniClusterHarness harness = new MiniClusterHarness();
+ mac = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"), new MiniClusterConfigurationCallback() {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
+ cfg.setNumTservers(1);
+ Map<String,String> siteCfg = cfg.getSiteConfig();
+ // Allow the proxy to impersonate the client user, but no one else
+ siteCfg.put(Property.INSTANCE_RPC_SASL_PROXYUSERS.getKey() + proxyPrincipal + ".users", kdc.getRootUser().getPrincipal());
+ siteCfg.put(Property.INSTANCE_RPC_SASL_PROXYUSERS.getKey() + proxyPrincipal + ".hosts", "*");
+ cfg.setSiteConfig(siteCfg);
+ }
+
+ }, kdc);
+
+ mac.start();
+ MiniAccumuloConfigImpl cfg = mac.getConfig();
+
+ // Proxy configuration
+ proxyPort = PortUtils.getRandomFreePort();
+ File proxyPropertiesFile = new File(cfg.getConfDir(), "proxy.properties");
+ Properties proxyProperties = new Properties();
+ proxyProperties.setProperty("useMockInstance", "false");
+ proxyProperties.setProperty("useMiniAccumulo", "false");
+ proxyProperties.setProperty("protocolFactory", TCompactProtocol.Factory.class.getName());
+ proxyProperties.setProperty("tokenClass", KerberosToken.class.getName());
+ proxyProperties.setProperty("port", Integer.toString(proxyPort));
+ proxyProperties.setProperty("maxFrameSize", "16M");
+ proxyProperties.setProperty("instance", mac.getInstanceName());
+ proxyProperties.setProperty("zookeepers", mac.getZooKeepers());
+ proxyProperties.setProperty("thriftServerType", "sasl");
+ proxyProperties.setProperty("kerberosPrincipal", proxyPrincipal);
+ proxyProperties.setProperty("kerberosKeytab", proxyKeytab.getCanonicalPath());
+
+ // Write out the proxy.properties file
+ FileWriter writer = new FileWriter(proxyPropertiesFile);
+ proxyProperties.store(writer, "Configuration for Accumulo proxy");
+ writer.close();
+
+ proxyProcess = mac.exec(Proxy.class, "-p", proxyPropertiesFile.getCanonicalPath());
+
+ // Enabled kerberos auth
+ Configuration conf = new Configuration(false);
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+
+ boolean success = false;
+ ClusterUser rootUser = kdc.getRootUser();
+ for (int i = 0; i < 10 && !success; i++) {
+
+ UserGroupInformation ugi;
+ try {
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ ugi = UserGroupInformation.getCurrentUser();
+ } catch (IOException ex) {
+ log.info("Login as root is failing", ex);
+ Thread.sleep(1000);
+ continue;
+ }
+
+ TSocket socket = new TSocket(hostname, proxyPort);
+ log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+ TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
+ "auth"), null, socket);
+
+ final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
+
+ try {
+ // UGI transport will perform the doAs for us
+ ugiTransport.open();
+ success = true;
+ } catch (TTransportException e) {
+ Throwable cause = e.getCause();
+ if (null != cause && cause instanceof ConnectException) {
+ log.info("Proxy not yet up, waiting");
+ Thread.sleep(1000);
+ continue;
+ }
+ } finally {
+ if (null != ugiTransport) {
+ ugiTransport.close();
+ }
+ }
+ }
+
+ assertTrue("Failed to connect to the proxy repeatedly", success);
+ }
+
+ @After
+ public void stopMac() throws Exception {
+ if (null != proxyProcess) {
+ log.info("Destroying proxy process");
+ proxyProcess.destroy();
+ log.info("Waiting for proxy termination");
+ proxyProcess.waitFor();
+ log.info("Proxy terminated");
+ }
+ if (null != mac) {
+ mac.stop();
+ }
+ }
+
+ @Test
+ public void testProxyClient() throws Exception {
+ ClusterUser rootUser = kdc.getRootUser();
+ UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+ TSocket socket = new TSocket(hostname, proxyPort);
+ log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+ TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
+ "auth"), null, socket);
+
+ final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
+
+ // UGI transport will perform the doAs for us
+ ugiTransport.open();
+
+ AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
+ Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
+
+ // Will fail if the proxy can impersonate the client
+ ByteBuffer login = client.login(rootUser.getPrincipal(), Collections.<String,String> emptyMap());
+
+ // For all of the below actions, the proxy user doesn't have permission to do any of them, but the client user does.
+ // The fact that any of them actually run tells us that impersonation is working.
+
+ // Create a table
+ String table = "table";
+ if (!client.tableExists(login, table)) {
+ client.createTable(login, table, true, TimeType.MILLIS);
+ }
+
+ // Write two records to the table
+ String writer = client.createWriter(login, table, new WriterOptions());
+ Map<ByteBuffer,List<ColumnUpdate>> updates = new HashMap<>();
+ ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap("cf1".getBytes(UTF_8)), ByteBuffer.wrap("cq1".getBytes(UTF_8)));
+ update.setValue(ByteBuffer.wrap("value1".getBytes(UTF_8)));
+ updates.put(ByteBuffer.wrap("row1".getBytes(UTF_8)), Collections.<ColumnUpdate> singletonList(update));
+ update = new ColumnUpdate(ByteBuffer.wrap("cf2".getBytes(UTF_8)), ByteBuffer.wrap("cq2".getBytes(UTF_8)));
+ update.setValue(ByteBuffer.wrap("value2".getBytes(UTF_8)));
+ updates.put(ByteBuffer.wrap("row2".getBytes(UTF_8)), Collections.<ColumnUpdate> singletonList(update));
+ client.update(writer, updates);
+
+ // Flush and close the writer
+ client.flush(writer);
+ client.closeWriter(writer);
+
+ // Open a scanner to the table
+ String scanner = client.createScanner(login, table, new ScanOptions());
+ ScanResult results = client.nextK(scanner, 10);
+ assertEquals(2, results.getResults().size());
+
+ // Check the first key-value
+ KeyValue kv = results.getResults().get(0);
+ Key k = kv.key;
+ ByteBuffer v = kv.value;
+ assertEquals(ByteBuffer.wrap("row1".getBytes(UTF_8)), k.row);
+ assertEquals(ByteBuffer.wrap("cf1".getBytes(UTF_8)), k.colFamily);
+ assertEquals(ByteBuffer.wrap("cq1".getBytes(UTF_8)), k.colQualifier);
+ assertEquals(ByteBuffer.wrap(new byte[0]), k.colVisibility);
+ assertEquals(ByteBuffer.wrap("value1".getBytes(UTF_8)), v);
+
+ // And then the second
+ kv = results.getResults().get(1);
+ k = kv.key;
+ v = kv.value;
+ assertEquals(ByteBuffer.wrap("row2".getBytes(UTF_8)), k.row);
+ assertEquals(ByteBuffer.wrap("cf2".getBytes(UTF_8)), k.colFamily);
+ assertEquals(ByteBuffer.wrap("cq2".getBytes(UTF_8)), k.colQualifier);
+ assertEquals(ByteBuffer.wrap(new byte[0]), k.colVisibility);
+ assertEquals(ByteBuffer.wrap("value2".getBytes(UTF_8)), v);
+
+ // Close the scanner
+ client.closeScanner(scanner);
+
+ ugiTransport.close();
+ }
+
+ @Test
+ public void testDisallowedClientForImpersonation() throws Exception {
+ String user = testName.getMethodName();
+ File keytab = new File(kdc.getKeytabDir(), user + ".keytab");
+ kdc.createPrincipal(keytab, user);
+
+ // Login as the new user
+ UserGroupInformation.loginUserFromKeytab(user, keytab.getAbsolutePath());
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+ log.info("Logged in as " + ugi);
+
+ // Expect an AccumuloSecurityException
+ thrown.expect(AccumuloSecurityException.class);
+ // Error msg would look like:
+ //
+ // org.apache.accumulo.core.client.AccumuloSecurityException: Error BAD_CREDENTIALS for user Principal in credentials object should match kerberos
+ // principal.
+ // Expected 'proxy/hw10447.local@EXAMPLE.COM' but was 'testDisallowedClientForImpersonation@EXAMPLE.COM' - Username or Password is Invalid)
+ thrown.expect(new ThriftExceptionMatchesPattern(".*Error BAD_CREDENTIALS.*"));
+ thrown.expect(new ThriftExceptionMatchesPattern(".*Expected '" + proxyPrincipal + "' but was '" + kdc.qualifyUser(user) + "'.*"));
+
+ TSocket socket = new TSocket(hostname, proxyPort);
+ log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+
+ // Should fail to open the tran
+ TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
+ "auth"), null, socket);
+
+ final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
+
+ // UGI transport will perform the doAs for us
+ ugiTransport.open();
+
+ AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
+ Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
+
+ // Will fail because the proxy can't impersonate this user (per the site configuration)
+ try {
+ client.login(kdc.qualifyUser(user), Collections.<String,String> emptyMap());
+ } finally {
+ if (null != ugiTransport) {
+ ugiTransport.close();
+ }
+ }
+ }
+
+ @Test
+ public void testMismatchPrincipals() throws Exception {
+ ClusterUser rootUser = kdc.getRootUser();
+ // Should get an AccumuloSecurityException and the given message
+ thrown.expect(AccumuloSecurityException.class);
+ thrown.expect(new ThriftExceptionMatchesPattern(ProxyServer.RPC_ACCUMULO_PRINCIPAL_MISMATCH_MSG));
+
+ // Make a new user
+ String user = testName.getMethodName();
+ File keytab = new File(kdc.getKeytabDir(), user + ".keytab");
+ kdc.createPrincipal(keytab, user);
+
+ // Login as the new user
+ UserGroupInformation.loginUserFromKeytab(user, keytab.getAbsolutePath());
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+ log.info("Logged in as " + ugi);
+
+ TSocket socket = new TSocket(hostname, proxyPort);
+ log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
+
+ // Should fail to open the tran
+ TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname, Collections.singletonMap("javax.security.sasl.qop",
+ "auth"), null, socket);
+
+ final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);
+
+ // UGI transport will perform the doAs for us
+ ugiTransport.open();
+
+ AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
+ Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));
+
+ // The proxy needs to recognize that the requested principal isn't the same as the SASL principal and fail
+ // Accumulo should let this through -- we need to rely on the proxy to dump me before talking to accumulo
+ try {
+ client.login(rootUser.getPrincipal(), Collections.<String,String> emptyMap());
+ } finally {
+ if (null != ugiTransport) {
+ ugiTransport.close();
+ }
+ }
+ }
+
+ private static class ThriftExceptionMatchesPattern extends TypeSafeMatcher<AccumuloSecurityException> {
+ private String pattern;
+
+ public ThriftExceptionMatchesPattern(String pattern) {
+ this.pattern = pattern;
+ }
+
+ @Override
+ protected boolean matchesSafely(AccumuloSecurityException item) {
+ return item.isSetMsg() && item.msg.matches(pattern);
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText("matches pattern ").appendValue(pattern);
+ }
+
+ @Override
+ protected void describeMismatchSafely(AccumuloSecurityException item, Description mismatchDescription) {
+ mismatchDescription.appendText("does not match");
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java b/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
new file mode 100644
index 0000000..72b51eb
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LargeRowIT.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.MemoryUnit;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LargeRowIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(LargeRowIT.class);
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setMemory(ServerType.TABLET_SERVER, cfg.getMemory(ServerType.TABLET_SERVER) * 2, MemoryUnit.BYTE);
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "10ms");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ private static final int SEED = 42;
+ private static final int NUM_ROWS = 100;
+ private static final int ROW_SIZE = 1 << 17;
+ private static final int NUM_PRE_SPLITS = 9;
+ private static final int SPLIT_THRESH = ROW_SIZE * NUM_ROWS / NUM_PRE_SPLITS;
+
+ private String REG_TABLE_NAME;
+ private String PRE_SPLIT_TABLE_NAME;
+ private int timeoutFactor = 1;
+ private String tservMajcDelay;
+
+ @Before
+ public void getTimeoutFactor() throws Exception {
+ try {
+ timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
+ } catch (NumberFormatException e) {
+ log.warn("Could not parse property value for 'timeout.factor' as integer: " + System.getProperty("timeout.factor"));
+ }
+
+ Assert.assertTrue("Timeout factor must be greater than or equal to 1", timeoutFactor >= 1);
+
+ String[] names = getUniqueNames(2);
+ REG_TABLE_NAME = names[0];
+ PRE_SPLIT_TABLE_NAME = names[1];
+
+ Connector c = getConnector();
+ tservMajcDelay = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
+ c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "10ms");
+ }
+
+ @After
+ public void resetMajcDelay() throws Exception {
+ if (null != tservMajcDelay) {
+ Connector conn = getConnector();
+ conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), tservMajcDelay);
+ }
+ }
+
+ @Test
+ public void run() throws Exception {
+ Random r = new Random();
+ byte rowData[] = new byte[ROW_SIZE];
+ r.setSeed(SEED + 1);
+ TreeSet<Text> splitPoints = new TreeSet<Text>();
+ for (int i = 0; i < NUM_PRE_SPLITS; i++) {
+ r.nextBytes(rowData);
+ TestIngest.toPrintableChars(rowData);
+ splitPoints.add(new Text(rowData));
+ }
+ Connector c = getConnector();
+ c.tableOperations().create(REG_TABLE_NAME);
+ c.tableOperations().create(PRE_SPLIT_TABLE_NAME);
+ c.tableOperations().setProperty(PRE_SPLIT_TABLE_NAME, Property.TABLE_MAX_END_ROW_SIZE.getKey(), "256K");
+ UtilWaitThread.sleep(3 * 1000);
+ c.tableOperations().addSplits(PRE_SPLIT_TABLE_NAME, splitPoints);
+ test1(c);
+ test2(c);
+ }
+
+ private void test1(Connector c) throws Exception {
+
+ basicTest(c, REG_TABLE_NAME, 0);
+
+ c.tableOperations().setProperty(REG_TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "" + SPLIT_THRESH);
+
+ UtilWaitThread.sleep(timeoutFactor * 12000);
+ log.info("checking splits");
+ FunctionalTestUtils.checkSplits(c, REG_TABLE_NAME, NUM_PRE_SPLITS / 2, NUM_PRE_SPLITS * 4);
+
+ verify(c, REG_TABLE_NAME);
+ }
+
+ private void test2(Connector c) throws Exception {
+ basicTest(c, PRE_SPLIT_TABLE_NAME, NUM_PRE_SPLITS);
+ }
+
+ private void basicTest(Connector c, String table, int expectedSplits) throws Exception {
+ BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
+
+ Random r = new Random();
+ byte rowData[] = new byte[ROW_SIZE];
+
+ r.setSeed(SEED);
+
+ for (int i = 0; i < NUM_ROWS; i++) {
+
+ r.nextBytes(rowData);
+ TestIngest.toPrintableChars(rowData);
+
+ Mutation mut = new Mutation(new Text(rowData));
+ mut.put(new Text(""), new Text(""), new Value(Integer.toString(i).getBytes(UTF_8)));
+ bw.addMutation(mut);
+ }
+
+ bw.close();
+
+ FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+
+ verify(c, table);
+
+ FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+
+ c.tableOperations().flush(table, null, null, false);
+
+ // verify while table flush is running
+ verify(c, table);
+
+ // give split time to complete
+ c.tableOperations().flush(table, null, null, true);
+
+ FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+
+ verify(c, table);
+
+ FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+ }
+
+ private void verify(Connector c, String table) throws Exception {
+ Random r = new Random();
+ byte rowData[] = new byte[ROW_SIZE];
+
+ r.setSeed(SEED);
+
+ Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
+
+ for (int i = 0; i < NUM_ROWS; i++) {
+
+ r.nextBytes(rowData);
+ TestIngest.toPrintableChars(rowData);
+
+ scanner.setRange(new Range(new Text(rowData)));
+
+ int count = 0;
+
+ for (Entry<Key,Value> entry : scanner) {
+ if (!entry.getKey().getRow().equals(new Text(rowData))) {
+ throw new Exception("verification failed, unexpected row i =" + i);
+ }
+ if (!entry.getValue().equals(Integer.toString(i).getBytes(UTF_8))) {
+ throw new Exception("verification failed, unexpected value i =" + i + " value = " + entry.getValue());
+ }
+ count++;
+ }
+
+ if (count != 1) {
+ throw new Exception("verification failed, unexpected count i =" + i + " count=" + count);
+ }
+
+ }
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/LateLastContactIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LateLastContactIT.java b/test/src/main/java/org/apache/accumulo/test/functional/LateLastContactIT.java
new file mode 100644
index 0000000..9c310f0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LateLastContactIT.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Collections;
+
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+/**
+ * Fake the "tablet stops talking but holds its lock" problem we see when hard drives and NFS fail. Start a ZombieTServer, and see that master stops it.
+ */
+public class LateLastContactIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setSiteConfig(Collections.singletonMap(Property.GENERAL_RPC_TIMEOUT.getKey(), "2s"));
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Process zombie = cluster.exec(ZombieTServer.class);
+ assertEquals(0, zombie.waitFor());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java b/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
new file mode 100644
index 0000000..1e7fef0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LogicalTimeIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(LogicalTimeIT.class);
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ @Test
+ public void run() throws Exception {
+ int tc = 0;
+ String tableName = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a"}, null, null, "b", 2l);
+ runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"z"}, null, null, "b", 2l);
+ runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "z"}, null, null, "b", 2l);
+ runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "c", "z"}, null, null, "b", 3l);
+ runMergeTest(c, tableName + tc++, new String[] {"m"}, new String[] {"a", "y", "z"}, null, null, "b", 3l);
+
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, null, "b", 2l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, null, "b", 2l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, null, "b", 2l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, null, "b", 2l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, null, "b", 3l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, null, "b", 3l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, null, "b", 3l);
+
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, "h", "b", 2l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, "h", "b", 2l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, "h", "b", 1l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, "h", "b", 2l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, "h", "b", 3l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, "h", "b", 3l);
+ runMergeTest(c, tableName + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, "h", "b", 2l);
+
+ }
+
+ private void runMergeTest(Connector conn, String table, String[] splits, String[] inserts, String start, String end, String last, long expected)
+ throws Exception {
+ log.info("table " + table);
+ conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
+ TreeSet<Text> splitSet = new TreeSet<Text>();
+ for (String split : splits) {
+ splitSet.add(new Text(split));
+ }
+ conn.tableOperations().addSplits(table, splitSet);
+
+ BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+ for (String row : inserts) {
+ Mutation m = new Mutation(row);
+ m.put("cf", "cq", "v");
+ bw.addMutation(m);
+ }
+
+ bw.flush();
+
+ conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
+
+ Mutation m = new Mutation(last);
+ m.put("cf", "cq", "v");
+ bw.addMutation(m);
+ bw.flush();
+
+ Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
+ scanner.setRange(new Range(last));
+
+ bw.close();
+
+ long time = scanner.iterator().next().getKey().getTimestamp();
+ if (time != expected)
+ throw new RuntimeException("unexpected time " + time + " " + expected);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
new file mode 100644
index 0000000..8c4666c
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MapReduceIT.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.Collections;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Base64;
+import org.apache.accumulo.examples.simple.mapreduce.RowHash;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MapReduceIT extends ConfigurableMacBase {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ public static final String hadoopTmpDirArg = "-Dhadoop.tmp.dir=" + System.getProperty("user.dir") + "/target/hadoop-tmp";
+
+ static final String tablename = "mapredf";
+ static final String input_cf = "cf-HASHTYPE";
+ static final String input_cq = "cq-NOTHASHED";
+ static final String input_cfcq = input_cf + ":" + input_cq;
+ static final String output_cq = "cq-MD4BASE64";
+ static final String output_cfcq = input_cf + ":" + output_cq;
+
+ @Test
+ public void test() throws Exception {
+ runTest(getConnector(), getCluster());
+ }
+
+ static void runTest(Connector c, MiniAccumuloClusterImpl cluster) throws AccumuloException, AccumuloSecurityException, TableExistsException,
+ TableNotFoundException, MutationsRejectedException, IOException, InterruptedException, NoSuchAlgorithmException {
+ c.tableOperations().create(tablename);
+ BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
+ for (int i = 0; i < 10; i++) {
+ Mutation m = new Mutation("" + i);
+ m.put(input_cf, input_cq, "row" + i);
+ bw.addMutation(m);
+ }
+ bw.close();
+ Process hash = cluster.exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-i", c.getInstance().getInstanceName(), "-z", c.getInstance()
+ .getZooKeepers(), "-u", "root", "-p", ROOT_PASSWORD, "-t", tablename, "--column", input_cfcq);
+ assertEquals(0, hash.waitFor());
+
+ Scanner s = c.createScanner(tablename, Authorizations.EMPTY);
+ s.fetchColumn(new Text(input_cf), new Text(output_cq));
+ int i = 0;
+ for (Entry<Key,Value> entry : s) {
+ MessageDigest md = MessageDigest.getInstance("MD5");
+ byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes()));
+ assertEquals(entry.getValue().toString(), new String(check));
+ i++;
+ }
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
new file mode 100644
index 0000000..72f8ce7
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MasterAssignmentIT.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+
+import java.io.FileNotFoundException;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.server.master.state.MetaDataTableScanner;
+import org.apache.accumulo.server.master.state.TabletLocationState;
+import org.apache.commons.configuration.ConfigurationException;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MasterAssignmentIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ String tableName = super.getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ String tableId = c.tableOperations().tableIdMap().get(tableName);
+ // wait for the table to be online
+ TabletLocationState newTablet;
+ do {
+ UtilWaitThread.sleep(250);
+ newTablet = getTabletLocationState(c, tableId);
+ } while (newTablet.current == null);
+ assertNull(newTablet.last);
+ assertNull(newTablet.future);
+
+ // put something in it
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("a");
+ m.put("b", "c", "d");
+ bw.addMutation(m);
+ bw.close();
+ // give it a last location
+ c.tableOperations().flush(tableName, null, null, true);
+
+ TabletLocationState flushed = getTabletLocationState(c, tableId);
+ assertEquals(newTablet.current, flushed.current);
+ assertEquals(flushed.current, flushed.last);
+ assertNull(newTablet.future);
+
+ // take the tablet offline
+ c.tableOperations().offline(tableName, true);
+ TabletLocationState offline = getTabletLocationState(c, tableId);
+ assertNull(offline.future);
+ assertNull(offline.current);
+ assertEquals(flushed.current, offline.last);
+
+ // put it back online
+ c.tableOperations().online(tableName, true);
+ TabletLocationState online = getTabletLocationState(c, tableId);
+ assertNull(online.future);
+ assertNotNull(online.current);
+ assertEquals(online.current, online.last);
+ }
+
+ private TabletLocationState getTabletLocationState(Connector c, String tableId) throws FileNotFoundException, ConfigurationException {
+ Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
+ ClientContext context = new ClientContext(c.getInstance(), creds, getCluster().getClientConfig());
+ MetaDataTableScanner s = new MetaDataTableScanner(context, new Range(KeyExtent.getMetadataEntry(new Text(tableId), null)));
+ TabletLocationState tlState = s.next();
+ s.close();
+ return tlState;
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
new file mode 100644
index 0000000..3489c26
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Map;
+
+import org.apache.accumulo.cluster.ClusterControl;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+public class MasterFailoverIT extends AccumuloClusterHarness {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 90;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ String[] names = getUniqueNames(2);
+ c.tableOperations().create(names[0]);
+ TestIngest.Opts opts = new TestIngest.Opts();
+ opts.setTableName(names[0]);
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConf);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ }
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+
+ ClusterControl control = cluster.getClusterControl();
+ control.stopAllServers(ServerType.MASTER);
+ // start up a new one
+ control.startAllServers(ServerType.MASTER);
+ // talk to it
+ c.tableOperations().rename(names[0], names[1]);
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+ vopts.setTableName(names[1]);
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ vopts.updateKerberosCredentials(clientConf);
+ } else {
+ vopts.setPrincipal(getAdminPrincipal());
+ }
+ VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java
new file mode 100644
index 0000000..6f08c1f
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenIT.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.admin.InstanceOperations;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * A functional test that exercises hitting the max open file limit on a tablet server. This test assumes there are one or two tablet servers.
+ */
+
+public class MaxOpenIT extends AccumuloClusterHarness {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> conf = cfg.getSiteConfig();
+ conf.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "4");
+ conf.put(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
+ conf.put(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), "2");
+ cfg.setSiteConfig(conf);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 3 * 60;
+ }
+
+ private String scanMaxOpenFiles, majcConcurrent, majcThreadMaxOpen;
+
+ @Before
+ public void alterConfig() throws Exception {
+ InstanceOperations iops = getConnector().instanceOperations();
+ Map<String,String> sysConfig = iops.getSystemConfiguration();
+ scanMaxOpenFiles = sysConfig.get(Property.TSERV_SCAN_MAX_OPENFILES.getKey());
+ majcConcurrent = sysConfig.get(Property.TSERV_MAJC_MAXCONCURRENT.getKey());
+ majcThreadMaxOpen = sysConfig.get(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey());
+ }
+
+ @After
+ public void restoreConfig() throws Exception {
+ InstanceOperations iops = getConnector().instanceOperations();
+ if (null != scanMaxOpenFiles) {
+ iops.setProperty(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), scanMaxOpenFiles);
+ }
+ if (null != majcConcurrent) {
+ iops.setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), majcConcurrent);
+ }
+ if (null != majcThreadMaxOpen) {
+ iops.setProperty(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), majcThreadMaxOpen);
+ }
+ }
+
+ private static final int NUM_TABLETS = 16;
+ private static final int NUM_TO_INGEST = 10000;
+
+ @Test
+ public void run() throws Exception {
+ final Connector c = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ final ClientConfiguration clientConf = cluster.getClientConfig();
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "10");
+ c.tableOperations().addSplits(tableName, TestIngest.getSplitPoints(0, NUM_TO_INGEST, NUM_TABLETS));
+
+ // the following loop should create three tablets in each map file
+ for (int i = 0; i < 3; i++) {
+ TestIngest.Opts opts = new TestIngest.Opts();
+ opts.timestamp = i;
+ opts.dataSize = 50;
+ opts.rows = NUM_TO_INGEST;
+ opts.cols = 1;
+ opts.random = i;
+ opts.setTableName(tableName);
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConf);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ }
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+
+ c.tableOperations().flush(tableName, null, null, true);
+ FunctionalTestUtils.checkRFiles(c, tableName, NUM_TABLETS, NUM_TABLETS, i + 1, i + 1);
+ }
+
+ List<Range> ranges = new ArrayList<Range>(NUM_TO_INGEST);
+
+ for (int i = 0; i < NUM_TO_INGEST; i++) {
+ ranges.add(new Range(TestIngest.generateRow(i, 0)));
+ }
+
+ long time1 = batchScan(c, tableName, ranges, 1);
+ // run it again, now that stuff is cached on the client and sever
+ time1 = batchScan(c, tableName, ranges, 1);
+ long time2 = batchScan(c, tableName, ranges, NUM_TABLETS);
+
+ System.out.printf("Single thread scan time %6.2f %n", time1 / 1000.0);
+ System.out.printf("Multiple thread scan time %6.2f %n", time2 / 1000.0);
+
+ }
+
+ private long batchScan(Connector c, String tableName, List<Range> ranges, int threads) throws Exception {
+ BatchScanner bs = c.createBatchScanner(tableName, TestIngest.AUTHS, threads);
+
+ bs.setRanges(ranges);
+
+ int count = 0;
+
+ long t1 = System.currentTimeMillis();
+
+ byte rval[] = new byte[50];
+ Random random = new Random();
+
+ for (Entry<Key,Value> entry : bs) {
+ count++;
+ int row = VerifyIngest.getRow(entry.getKey());
+ int col = VerifyIngest.getCol(entry.getKey());
+
+ if (row < 0 || row >= NUM_TO_INGEST) {
+ throw new Exception("unexcepted row " + row);
+ }
+
+ rval = TestIngest.genRandomValue(random, rval, 2, row, col);
+
+ if (entry.getValue().compareTo(rval) != 0) {
+ throw new Exception("unexcepted value row=" + row + " col=" + col);
+ }
+ }
+
+ long t2 = System.currentTimeMillis();
+
+ bs.close();
+
+ if (count != NUM_TO_INGEST) {
+ throw new Exception("Batch Scan did not return expected number of values " + count);
+ }
+
+ return t2 - t1;
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
new file mode 100644
index 0000000..9e3e8b6
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MergeIT.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.NewTableConfiguration;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Merge;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MergeIT extends AccumuloClusterHarness {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 8 * 60;
+ }
+
+ SortedSet<Text> splits(String[] points) {
+ SortedSet<Text> result = new TreeSet<Text>();
+ for (String point : points)
+ result.add(new Text(point));
+ return result;
+ }
+
+ @Test
+ public void merge() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k".split(" ")));
+ BatchWriter bw = c.createBatchWriter(tableName, null);
+ for (String row : "a b c d e f g h i j k".split(" ")) {
+ Mutation m = new Mutation(row);
+ m.put("cf", "cq", "value");
+ bw.addMutation(m);
+ }
+ bw.close();
+ c.tableOperations().flush(tableName, null, null, true);
+ c.tableOperations().merge(tableName, new Text("c1"), new Text("f1"));
+ assertEquals(8, c.tableOperations().listSplits(tableName).size());
+ }
+
+ @Test
+ public void mergeSize() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().addSplits(tableName, splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
+ BatchWriter bw = c.createBatchWriter(tableName, null);
+ for (String row : "c e f y".split(" ")) {
+ Mutation m = new Mutation(row);
+ m.put("cf", "cq", "mersydotesanddozeydotesanlittolamsiedives");
+ bw.addMutation(m);
+ }
+ bw.close();
+ c.tableOperations().flush(tableName, null, null, true);
+ Merge merge = new Merge();
+ merge.mergomatic(c, tableName, null, null, 100, false);
+ assertArrayEquals("b c d e f x y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
+ merge.mergomatic(c, tableName, null, null, 100, true);
+ assertArrayEquals("c e f y".split(" "), toStrings(c.tableOperations().listSplits(tableName)));
+ }
+
+ private String[] toStrings(Collection<Text> listSplits) {
+ String[] result = new String[listSplits.size()];
+ int i = 0;
+ for (Text t : listSplits) {
+ result[i++] = t.toString();
+ }
+ return result;
+ }
+
+ private String[] ns(String... strings) {
+ return strings;
+ }
+
+ @Test
+ public void mergeTest() throws Exception {
+ int tc = 0;
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ runMergeTest(c, tableName + tc++, ns(), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
+
+ runMergeTest(c, tableName + tc++, ns("m"), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
+ runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns("m", "n"), ns(null, "z"));
+ runMergeTest(c, tableName + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns(null, "b"), ns("l", "m"));
+
+ runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns(), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns(null, "s"));
+ runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("c", "m"));
+ runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("n", "r"));
+ runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns(null, "s"));
+ runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns(null, "s"));
+ runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("q", "r"));
+ runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("aa", "b"));
+ runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("r", "s"), ns(null, "z"));
+ runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("l", "m"));
+ runMergeTest(c, tableName + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns("q", "r"));
+
+ }
+
+ private void runMergeTest(Connector c, String table, String[] splits, String[] expectedSplits, String[] inserts, String[] start, String[] end)
+ throws Exception {
+ int count = 0;
+
+ for (String s : start) {
+ for (String e : end) {
+ runMergeTest(c, table + "_" + count++, splits, expectedSplits, inserts, s, e);
+ }
+ }
+ }
+
+ private void runMergeTest(Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end)
+ throws Exception {
+ System.out.println("Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
+
+ conn.tableOperations().create(table, new NewTableConfiguration().setTimeType(TimeType.LOGICAL));
+ TreeSet<Text> splitSet = new TreeSet<Text>();
+ for (String split : splits) {
+ splitSet.add(new Text(split));
+ }
+ conn.tableOperations().addSplits(table, splitSet);
+
+ BatchWriter bw = conn.createBatchWriter(table, null);
+ HashSet<String> expected = new HashSet<String>();
+ for (String row : inserts) {
+ Mutation m = new Mutation(row);
+ m.put("cf", "cq", row);
+ bw.addMutation(m);
+ expected.add(row);
+ }
+
+ bw.close();
+
+ conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
+
+ Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
+
+ HashSet<String> observed = new HashSet<String>();
+ for (Entry<Key,Value> entry : scanner) {
+ String row = entry.getKey().getRowData().toString();
+ if (!observed.add(row)) {
+ throw new Exception("Saw data twice " + table + " " + row);
+ }
+ }
+
+ if (!observed.equals(expected)) {
+ throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
+ }
+
+ HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table));
+ HashSet<Text> ess = new HashSet<Text>();
+ for (String es : expectedSplits) {
+ ess.add(new Text(es));
+ }
+
+ if (!currentSplits.equals(ess)) {
+ throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
+ }
+
+ }
+
+}
[22/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/conf/monitor_logger.xml
----------------------------------------------------------------------
diff --git a/test/src/main/resources/conf/monitor_logger.xml b/test/src/main/resources/conf/monitor_logger.xml
new file mode 100644
index 0000000..91a7671
--- /dev/null
+++ b/test/src/main/resources/conf/monitor_logger.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
+
+ <!-- Write out everything at the DEBUG level to the debug log -->
+ <appender name="A2" class="org.apache.log4j.RollingFileAppender">
+ <param name="File" value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.debug.log"/>
+ <param name="MaxFileSize" value="100MB"/>
+ <param name="MaxBackupIndex" value="10"/>
+ <param name="Threshold" value="DEBUG"/>
+ <layout class="org.apache.log4j.PatternLayout">
+ <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %X{application} %m%n"/>
+ </layout>
+ </appender>
+
+ <!-- Write out INFO and higher to the regular log -->
+ <appender name="A3" class="org.apache.log4j.RollingFileAppender">
+ <param name="File" value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.log"/>
+ <param name="MaxFileSize" value="100MB"/>
+ <param name="MaxBackupIndex" value="10"/>
+ <param name="Threshold" value="INFO"/>
+ <layout class="org.apache.log4j.PatternLayout">
+ <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %X{application} %m%n"/>
+ </layout>
+ </appender>
+
+ <!-- Keep the last few log messages for display to the user -->
+ <appender name="GUI" class="org.apache.accumulo.server.monitor.LogService">
+ <param name="keep" value="40"/>
+ <param name="Threshold" value="WARN"/>
+ </appender>
+
+ <!-- Log accumulo messages to debug, normal and GUI -->
+ <logger name="org.apache.accumulo" additivity="false">
+ <level value="DEBUG"/>
+ <appender-ref ref="A2" />
+ <appender-ref ref="A3" />
+ <appender-ref ref="GUI" />
+ </logger>
+
+ <!-- Log non-accumulo messages to debug, normal logs. -->
+ <root>
+ <level value="INFO"/>
+ <appender-ref ref="A2" />
+ <appender-ref ref="A3" />
+ </root>
+
+</log4j:configuration>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/test/src/main/resources/log4j.properties b/test/src/main/resources/log4j.properties
new file mode 100644
index 0000000..26ea762
--- /dev/null
+++ b/test/src/main/resources/log4j.properties
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+log4j.rootLogger=DEBUG, CA
+log4j.appender.CA=org.apache.log4j.ConsoleAppender
+log4j.appender.CA.layout=org.apache.log4j.PatternLayout
+log4j.appender.CA.layout.ConversionPattern=%d{ISO8601} [%c{2}] %-5p: %m%n
+
+log4j.logger.org.apache.accumulo.core=DEBUG
+log4j.logger.org.apache.accumulo.core.client.impl.MasterClient=INFO
+log4j.logger.org.apache.accumulo.core.client.impl.ServerClient=ERROR
+log4j.logger.org.apache.accumulo.core.util.shell.Shell.audit=OFF
+log4j.logger.org.apache.accumulo.core.util.shell.Shell=FATAL
+log4j.logger.org.apache.commons.vfs2.impl.DefaultFileSystemManager=WARN
+log4j.logger.org.apache.hadoop.io.compress.CodecPool=WARN
+log4j.logger.org.apache.hadoop.mapred=ERROR
+log4j.logger.org.apache.hadoop.tools.DistCp=WARN
+log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
+log4j.logger.org.apache.hadoop.util.ProcessTree=WARN
+log4j.logger.org.apache.zookeeper.ClientCnxn=FATAL
+log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=INFO
+log4j.logger.org.apache.zookeeper.ZooKeeper=WARN
+log4j.logger.org.apache.accumulo.core.file.rfile.bcfile=INFO
+log4j.logger.org.apache.accumulo.server.util.ReplicationTableUtil=TRACE
+log4j.logger.org.apache.accumulo.core.client.impl.ThriftScanner=INFO
+log4j.logger.org.apache.accumulo.fate.zookeeper.DistributedReadWriteLock=WARN
+log4j.logger.org.mortbay.log=WARN
+log4j.logger.org.apache.hadoop=WARN
+log4j.logger.org.apache.jasper=INFO
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
+log4j.logger.org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace=WARN
+log4j.logger.BlockStateChange=WARN
+log4j.logger.org.apache.accumulo.core.client.impl.TabletServerBatchReaderIterator=INFO
+log4j.logger.org.apache.hadoop.security=DEBUG
+log4j.logger.org.apache.hadoop.minikdc=DEBUG
+log4j.logger.org.apache.directory=INFO
+log4j.logger.org.apache.directory.api.ldap=WARN
+# This is really spammy at debug
+log4j.logger.org.apache.thrift.transport.TSaslTransport=INFO
+# From apache-ds/minikdc
+log4j.logger.org.apache.mina=INFO
+log4j.logger.org.apache.accumulo.server.thrift.UGIAssumingProcessor=TRACE
+log4j.logger.org.apache.hadoop.security.UserGroupInformation=INFO
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/randomwalk/Basic.xml
----------------------------------------------------------------------
diff --git a/test/src/main/resources/randomwalk/Basic.xml b/test/src/main/resources/randomwalk/Basic.xml
new file mode 100644
index 0000000..2dead02
--- /dev/null
+++ b/test/src/main/resources/randomwalk/Basic.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<module>
+
+<package prefix="test" value="org.apache.accumulo.test.randomwalk.unit"/>
+
+<init id="test.CreateTable"/>
+
+<node id="test.CreateTable">
+ <edge id="unit/Simple.xml" weight="1"/>
+</node>
+
+<node id="unit/Simple.xml">
+ <edge id="unit/Simple.xml" weight="3"/>
+ <edge id="test.DeleteTable" weight="1"/>
+</node>
+
+<node id="test.DeleteTable">
+ <edge id="END" weight="1"/>
+</node>
+
+</module>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/randomwalk/Simple.xml
----------------------------------------------------------------------
diff --git a/test/src/main/resources/randomwalk/Simple.xml b/test/src/main/resources/randomwalk/Simple.xml
new file mode 100644
index 0000000..cad940e
--- /dev/null
+++ b/test/src/main/resources/randomwalk/Simple.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<module>
+
+<package prefix="test" value="org.apache.accumulo.test.randomwalk.unit"/>
+
+<init id="dummy.all"/>
+
+<node id="dummy.all">
+ <edge id="test.Ingest" weight="1"/>
+ <edge id="test.Verify" weight="1"/>
+ <edge id="test.Scan" weight="1"/>
+ <edge id="END" weight="1"/>
+</node>
+
+<node id="test.Ingest">
+ <edge id="dummy.all" weight="1"/>
+</node>
+
+<node id="test.Verify">
+ <edge id="dummy.all" weight="1"/>
+</node>
+
+<node id="test.Scan">
+ <edge id="dummy.all" weight="1"/>
+</node>
+
+</module>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/unit/Basic.xml
----------------------------------------------------------------------
diff --git a/test/src/main/resources/unit/Basic.xml b/test/src/main/resources/unit/Basic.xml
new file mode 100644
index 0000000..2dead02
--- /dev/null
+++ b/test/src/main/resources/unit/Basic.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<module>
+
+<package prefix="test" value="org.apache.accumulo.test.randomwalk.unit"/>
+
+<init id="test.CreateTable"/>
+
+<node id="test.CreateTable">
+ <edge id="unit/Simple.xml" weight="1"/>
+</node>
+
+<node id="unit/Simple.xml">
+ <edge id="unit/Simple.xml" weight="3"/>
+ <edge id="test.DeleteTable" weight="1"/>
+</node>
+
+<node id="test.DeleteTable">
+ <edge id="END" weight="1"/>
+</node>
+
+</module>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/unit/Simple.xml
----------------------------------------------------------------------
diff --git a/test/src/main/resources/unit/Simple.xml b/test/src/main/resources/unit/Simple.xml
new file mode 100644
index 0000000..cad940e
--- /dev/null
+++ b/test/src/main/resources/unit/Simple.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<module>
+
+<package prefix="test" value="org.apache.accumulo.test.randomwalk.unit"/>
+
+<init id="dummy.all"/>
+
+<node id="dummy.all">
+ <edge id="test.Ingest" weight="1"/>
+ <edge id="test.Verify" weight="1"/>
+ <edge id="test.Scan" weight="1"/>
+ <edge id="END" weight="1"/>
+</node>
+
+<node id="test.Ingest">
+ <edge id="dummy.all" weight="1"/>
+</node>
+
+<node id="test.Verify">
+ <edge id="dummy.all" weight="1"/>
+</node>
+
+<node id="test.Scan">
+ <edge id="dummy.all" weight="1"/>
+</node>
+
+</module>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterHarness.java b/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
deleted file mode 100644
index 30058db..0000000
--- a/test/src/test/java/org/apache/accumulo/harness/AccumuloClusterHarness.java
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.harness;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-
-import org.apache.accumulo.cluster.AccumuloCluster;
-import org.apache.accumulo.cluster.ClusterControl;
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.cluster.ClusterUsers;
-import org.apache.accumulo.cluster.standalone.StandaloneAccumuloCluster;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.admin.SecurityOperations;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.harness.conf.AccumuloClusterConfiguration;
-import org.apache.accumulo.harness.conf.AccumuloClusterPropertyConfiguration;
-import org.apache.accumulo.harness.conf.AccumuloMiniClusterConfiguration;
-import org.apache.accumulo.harness.conf.StandaloneAccumuloClusterConfiguration;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-/**
- * General Integration-Test base class that provides access to an Accumulo instance for testing. This instance could be MAC or a standalone instance.
- */
-public abstract class AccumuloClusterHarness extends AccumuloITBase implements MiniClusterConfigurationCallback, ClusterUsers {
- private static final Logger log = LoggerFactory.getLogger(AccumuloClusterHarness.class);
- private static final String TRUE = Boolean.toString(true);
-
- public static enum ClusterType {
- MINI, STANDALONE;
-
- public boolean isDynamic() {
- return this == MINI;
- }
- }
-
- private static boolean initialized = false;
-
- protected static AccumuloCluster cluster;
- protected static ClusterType type;
- protected static AccumuloClusterPropertyConfiguration clusterConf;
- protected static TestingKdc krb;
-
- @BeforeClass
- public static void setUp() throws Exception {
- clusterConf = AccumuloClusterPropertyConfiguration.get();
- type = clusterConf.getClusterType();
-
- if (ClusterType.MINI == type && TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION))) {
- krb = new TestingKdc();
- krb.start();
- log.info("MiniKdc started");
- }
-
- initialized = true;
- }
-
- @AfterClass
- public static void tearDownKdc() throws Exception {
- if (null != krb) {
- krb.stop();
- }
- }
-
- /**
- * The {@link TestingKdc} used for this {@link AccumuloCluster}. Might be null.
- */
- public static TestingKdc getKdc() {
- return krb;
- }
-
- @Before
- public void setupCluster() throws Exception {
- // Before we try to instantiate the cluster, check to see if the test even wants to run against this type of cluster
- Assume.assumeTrue(canRunTest(type));
-
- switch (type) {
- case MINI:
- MiniClusterHarness miniClusterHarness = new MiniClusterHarness();
- // Intrinsically performs the callback to let tests alter MiniAccumuloConfig and core-site.xml
- MiniAccumuloClusterImpl impl = miniClusterHarness.create(this, getAdminToken(), krb);
- cluster = impl;
- // MAC makes a ClientConf for us, just set it
- ((AccumuloMiniClusterConfiguration) clusterConf).setClientConf(impl.getClientConfig());
- // Login as the "root" user
- if (null != krb) {
- ClusterUser rootUser = krb.getRootUser();
- // Log in the 'client' user
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- }
- break;
- case STANDALONE:
- StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
- ClientConfiguration clientConf = conf.getClientConf();
- StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance(), clientConf, conf.getTmpDirectory(), conf.getUsers(),
- conf.getAccumuloServerUser());
- // If these are provided in the configuration, pass them into the cluster
- standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
- standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir());
- standaloneCluster.setServerAccumuloConfDir(conf.getServerAccumuloConfDir());
- standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());
-
- // For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in as SIMPLE instead of KERBEROS
- Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration();
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- UserGroupInformation.setConfiguration(hadoopConfiguration);
- // Login as the admin user to start the tests
- UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(), conf.getAdminKeytab().getAbsolutePath());
- }
-
- // Set the implementation
- cluster = standaloneCluster;
- break;
- default:
- throw new RuntimeException("Unhandled type");
- }
-
- if (type.isDynamic()) {
- cluster.start();
- } else {
- log.info("Removing tables which appear to be from a previous test run");
- cleanupTables();
- log.info("Removing users which appear to be from a previous test run");
- cleanupUsers();
- }
-
- switch (type) {
- case MINI:
- if (null != krb) {
- final String traceTable = Property.TRACE_TABLE.getDefaultValue();
- final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
-
- // Login as the trace user
- UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(), systemUser.getKeytab().getAbsolutePath());
-
- // Open a connector as the system user (ensures the user will exist for us to assign permissions to)
- Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken(systemUser.getPrincipal(), systemUser.getKeytab(), true));
-
- // Then, log back in as the "root" user and do the grant
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- conn = getConnector();
-
- // Create the trace table
- conn.tableOperations().create(traceTable);
-
- // Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
- // to have the ability to read, write and alter the trace table
- conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.READ);
- conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.WRITE);
- conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.ALTER_TABLE);
- }
- break;
- default:
- // do nothing
- }
- }
-
- public void cleanupTables() throws Exception {
- final String tablePrefix = this.getClass().getSimpleName() + "_";
- final TableOperations tops = getConnector().tableOperations();
- for (String table : tops.list()) {
- if (table.startsWith(tablePrefix)) {
- log.debug("Removing table {}", table);
- tops.delete(table);
- }
- }
- }
-
- public void cleanupUsers() throws Exception {
- final String userPrefix = this.getClass().getSimpleName();
- final SecurityOperations secOps = getConnector().securityOperations();
- for (String user : secOps.listLocalUsers()) {
- if (user.startsWith(userPrefix)) {
- log.info("Dropping local user {}", user);
- secOps.dropLocalUser(user);
- }
- }
- }
-
- @After
- public void teardownCluster() throws Exception {
- if (null != cluster) {
- if (type.isDynamic()) {
- cluster.stop();
- } else {
- log.info("Removing tables which appear to be from the current test");
- cleanupTables();
- log.info("Removing users which appear to be from the current test");
- cleanupUsers();
- }
- }
- }
-
- public static AccumuloCluster getCluster() {
- Preconditions.checkState(initialized);
- return cluster;
- }
-
- public static ClusterControl getClusterControl() {
- Preconditions.checkState(initialized);
- return cluster.getClusterControl();
- }
-
- public static ClusterType getClusterType() {
- Preconditions.checkState(initialized);
- return type;
- }
-
- public static String getAdminPrincipal() {
- Preconditions.checkState(initialized);
- return clusterConf.getAdminPrincipal();
- }
-
- public static AuthenticationToken getAdminToken() {
- Preconditions.checkState(initialized);
- return clusterConf.getAdminToken();
- }
-
- @Override
- public ClusterUser getAdminUser() {
- switch (type) {
- case MINI:
- if (null == krb) {
- PasswordToken passwordToken = (PasswordToken) getAdminToken();
- return new ClusterUser(getAdminPrincipal(), new String(passwordToken.getPassword(), UTF_8));
- }
- return krb.getRootUser();
- case STANDALONE:
- return new ClusterUser(getAdminPrincipal(), ((StandaloneAccumuloClusterConfiguration) clusterConf).getAdminKeytab());
- default:
- throw new RuntimeException("Unknown cluster type");
- }
- }
-
- @Override
- public ClusterUser getUser(int offset) {
- switch (type) {
- case MINI:
- if (null != krb) {
- // Defer to the TestingKdc when kerberos is on so we can get the keytab instead of a password
- return krb.getClientPrincipal(offset);
- } else {
- // Come up with a mostly unique name
- String principal = getClass().getSimpleName() + "_" + testName.getMethodName() + "_" + offset;
- // Username and password are the same
- return new ClusterUser(principal, principal);
- }
- case STANDALONE:
- return ((StandaloneAccumuloCluster) cluster).getUser(offset);
- default:
- throw new RuntimeException("Unknown cluster type");
- }
- }
-
- public static FileSystem getFileSystem() throws IOException {
- Preconditions.checkState(initialized);
- return cluster.getFileSystem();
- }
-
- public static AccumuloClusterConfiguration getClusterConfiguration() {
- Preconditions.checkState(initialized);
- return clusterConf;
- }
-
- public Connector getConnector() {
- try {
- String princ = getAdminPrincipal();
- AuthenticationToken token = getAdminToken();
- log.debug("Creating connector as {} with {}", princ, token);
- return cluster.getConnector(princ, token);
- } catch (Exception e) {
- log.error("Could not connect to Accumulo", e);
- fail("Could not connect to Accumulo: " + e.getMessage());
-
- throw new RuntimeException("Could not connect to Accumulo", e);
- }
- }
-
- // TODO Really don't want this here. Will ultimately need to abstract configuration method away from MAConfig
- // and change over to something more generic
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {}
-
- /**
- * A test may not be capable of running against a given AccumuloCluster. Implementations can override this method to advertise that they cannot (or perhaps do
- * not) want to run the test.
- */
- public boolean canRunTest(ClusterType type) {
- return true;
- }
-
- /**
- * Tries to give a reasonable directory which can be used to create temporary files for the test. Makes a basic attempt to create the directory if it does not
- * already exist.
- *
- * @return A directory which can be expected to exist on the Cluster's FileSystem
- */
- public Path getUsableDir() throws IllegalArgumentException, IOException {
- return cluster.getTemporaryPath();
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/harness/AccumuloITBase.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/AccumuloITBase.java b/test/src/test/java/org/apache/accumulo/harness/AccumuloITBase.java
deleted file mode 100644
index 8e2f6e0..0000000
--- a/test/src/test/java/org/apache/accumulo/harness/AccumuloITBase.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.harness;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-
-import org.apache.commons.io.FileUtils;
-import org.junit.Rule;
-import org.junit.rules.TestName;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Methods, setup and/or infrastructure which are common to any Accumulo integration test.
- */
-public class AccumuloITBase {
- private static final Logger log = LoggerFactory.getLogger(AccumuloITBase.class);
-
- @Rule
- public TestName testName = new TestName();
-
- public String[] getUniqueNames(int num) {
- String[] names = new String[num];
- for (int i = 0; i < num; i++)
- names[i] = this.getClass().getSimpleName() + "_" + testName.getMethodName() + i;
- return names;
- }
-
- /**
- * Determines an appropriate directory name for holding generated ssl files for a test. The directory returned will have the same name as the provided
- * directory, but with the suffix "-ssl" appended. This new directory is not created here, but is expected to be created as needed.
- *
- * @param baseDir
- * the original directory, which the new directory will be created next to; it should exist
- * @return the new directory (is not created)
- */
- public static File getSslDir(File baseDir) {
- assertTrue(baseDir.exists() && baseDir.isDirectory());
- return new File(baseDir.getParentFile(), baseDir.getName() + "-ssl");
- }
-
- public static File createTestDir(String name) {
- File baseDir = new File(System.getProperty("user.dir") + "/target/mini-tests");
- assertTrue(baseDir.mkdirs() || baseDir.isDirectory());
- if (name == null)
- return baseDir;
- File testDir = new File(baseDir, name);
- FileUtils.deleteQuietly(testDir);
- assertTrue(testDir.mkdir());
- return testDir;
- }
-
- /**
- * If a given IT test has a method that takes longer than a class-set default timeout, declare it failed.
- *
- * Note that this provides a upper bound on test times, even in the presence of Test annotations with a timeout. That is, the Test annotatation can make the
- * timing tighter but will not be able to allow a timeout that takes longer.
- *
- * Defaults to no timeout and can be changed via two mechanisms
- *
- * 1) A given IT class can override the defaultTimeoutSeconds method if test methods in that class should have a timeout. 2) The system property
- * "timeout.factor" is used as a multiplier for the class provided default
- *
- * Note that if either of these values is '0' tests will run with no timeout. The default class level timeout is set to 0.
- *
- */
- @Rule
- public Timeout testsShouldTimeout() {
- int waitLonger = 0;
- try {
- String timeoutString = System.getProperty("timeout.factor");
- if (timeoutString != null && !timeoutString.isEmpty()) {
- waitLonger = Integer.parseInt(timeoutString);
- }
- } catch (NumberFormatException exception) {
- log.warn("Could not parse timeout.factor, defaulting to no timeout.");
- }
- return new Timeout(waitLonger * defaultTimeoutSeconds() * 1000);
- }
-
- /**
- * time to wait per-method before declaring a timeout, in seconds.
- */
- protected int defaultTimeoutSeconds() {
- return 0;
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/harness/MiniClusterConfigurationCallback.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/MiniClusterConfigurationCallback.java b/test/src/test/java/org/apache/accumulo/harness/MiniClusterConfigurationCallback.java
deleted file mode 100644
index 5fa6eb5..0000000
--- a/test/src/test/java/org/apache/accumulo/harness/MiniClusterConfigurationCallback.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.harness;
-
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Callback interface to inject configuration into the MiniAccumuloCluster or Hadoop core-site.xml file used by the MiniAccumuloCluster
- */
-public interface MiniClusterConfigurationCallback {
-
- public static class NoCallback implements MiniClusterConfigurationCallback {
-
- private NoCallback() {}
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
- return;
- }
- }
-
- public static final MiniClusterConfigurationCallback NO_CALLBACK = new NoCallback();
-
- void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite);
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/harness/MiniClusterHarness.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/MiniClusterHarness.java b/test/src/test/java/org/apache/accumulo/harness/MiniClusterHarness.java
deleted file mode 100644
index d923593..0000000
--- a/test/src/test/java/org/apache/accumulo/harness/MiniClusterHarness.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.harness;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.OutputStream;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.security.handler.KerberosAuthenticator;
-import org.apache.accumulo.server.security.handler.KerberosAuthorizor;
-import org.apache.accumulo.server.security.handler.KerberosPermissionHandler;
-import org.apache.accumulo.test.functional.NativeMapIT;
-import org.apache.accumulo.test.util.CertUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Charsets;
-import com.google.common.base.Preconditions;
-
-/**
- * Harness that sets up a MiniAccumuloCluster in a manner expected for Accumulo integration tests.
- */
-public class MiniClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(MiniClusterHarness.class);
-
- private static final AtomicLong COUNTER = new AtomicLong(0);
-
- public static final String USE_SSL_FOR_IT_OPTION = "org.apache.accumulo.test.functional.useSslForIT",
- USE_CRED_PROVIDER_FOR_IT_OPTION = "org.apache.accumulo.test.functional.useCredProviderForIT",
- USE_KERBEROS_FOR_IT_OPTION = "org.apache.accumulo.test.functional.useKrbForIT", TRUE = Boolean.toString(true);
-
- // TODO These are defined in MiniKdc >= 2.6.0. Can be removed when minimum Hadoop dependency is increased to that.
- public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf", SUN_SECURITY_KRB5_DEBUG = "sun.security.krb5.debug";
-
- /**
- * Create a MiniAccumuloCluster using the given Token as the credentials for the root user.
- */
- public MiniAccumuloClusterImpl create(AuthenticationToken token) throws Exception {
- return create(MiniClusterHarness.class.getName(), Long.toString(COUNTER.incrementAndGet()), token);
- }
-
- public MiniAccumuloClusterImpl create(AuthenticationToken token, TestingKdc kdc) throws Exception {
- return create(MiniClusterHarness.class.getName(), Long.toString(COUNTER.incrementAndGet()), token, kdc);
- }
-
- public MiniAccumuloClusterImpl create(AccumuloITBase testBase, AuthenticationToken token) throws Exception {
- return create(testBase.getClass().getName(), testBase.testName.getMethodName(), token);
- }
-
- public MiniAccumuloClusterImpl create(AccumuloITBase testBase, AuthenticationToken token, TestingKdc kdc) throws Exception {
- return create(testBase, token, kdc, MiniClusterConfigurationCallback.NO_CALLBACK);
- }
-
- public MiniAccumuloClusterImpl create(AccumuloITBase testBase, AuthenticationToken token, TestingKdc kdc, MiniClusterConfigurationCallback configCallback)
- throws Exception {
- return create(testBase.getClass().getName(), testBase.testName.getMethodName(), token, configCallback, kdc);
- }
-
- public MiniAccumuloClusterImpl create(AccumuloClusterHarness testBase, AuthenticationToken token, TestingKdc kdc) throws Exception {
- return create(testBase.getClass().getName(), testBase.testName.getMethodName(), token, testBase, kdc);
- }
-
- public MiniAccumuloClusterImpl create(AccumuloClusterHarness testBase, AuthenticationToken token, MiniClusterConfigurationCallback callback) throws Exception {
- return create(testBase.getClass().getName(), testBase.testName.getMethodName(), token, callback);
- }
-
- public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token) throws Exception {
- return create(testClassName, testMethodName, token, MiniClusterConfigurationCallback.NO_CALLBACK);
- }
-
- public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token, TestingKdc kdc) throws Exception {
- return create(testClassName, testMethodName, token, MiniClusterConfigurationCallback.NO_CALLBACK, kdc);
- }
-
- public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token, MiniClusterConfigurationCallback configCallback)
- throws Exception {
- return create(testClassName, testMethodName, token, configCallback, null);
- }
-
- public MiniAccumuloClusterImpl create(String testClassName, String testMethodName, AuthenticationToken token,
- MiniClusterConfigurationCallback configCallback, TestingKdc kdc) throws Exception {
- Preconditions.checkNotNull(token);
- Preconditions.checkArgument(token instanceof PasswordToken || token instanceof KerberosToken, "A PasswordToken or KerberosToken is required");
-
- String rootPasswd;
- if (token instanceof PasswordToken) {
- rootPasswd = new String(((PasswordToken) token).getPassword(), Charsets.UTF_8);
- } else {
- rootPasswd = UUID.randomUUID().toString();
- }
-
- File baseDir = AccumuloClusterHarness.createTestDir(testClassName + "_" + testMethodName);
- MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, rootPasswd);
-
- // Enable native maps by default
- cfg.setNativeLibPaths(NativeMapIT.nativeMapLocation().getAbsolutePath());
- cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
-
- Configuration coreSite = new Configuration(false);
-
- // Setup SSL and credential providers if the properties request such
- configureForEnvironment(cfg, getClass(), AccumuloClusterHarness.getSslDir(baseDir), coreSite, kdc);
-
- // Invoke the callback for tests to configure MAC before it starts
- configCallback.configureMiniCluster(cfg, coreSite);
-
- MiniAccumuloClusterImpl miniCluster = new MiniAccumuloClusterImpl(cfg);
-
- // Write out any configuration items to a file so HDFS will pick them up automatically (from the classpath)
- if (coreSite.size() > 0) {
- File csFile = new File(miniCluster.getConfig().getConfDir(), "core-site.xml");
- if (csFile.exists())
- throw new RuntimeException(csFile + " already exist");
-
- OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(miniCluster.getConfig().getConfDir(), "core-site.xml")));
- coreSite.writeXml(out);
- out.close();
- }
-
- return miniCluster;
- }
-
- protected void configureForEnvironment(MiniAccumuloConfigImpl cfg, Class<?> testClass, File folder, Configuration coreSite, TestingKdc kdc) {
- if (TRUE.equals(System.getProperty(USE_SSL_FOR_IT_OPTION))) {
- configureForSsl(cfg, folder);
- }
- if (TRUE.equals(System.getProperty(USE_CRED_PROVIDER_FOR_IT_OPTION))) {
- cfg.setUseCredentialProvider(true);
- }
-
- if (TRUE.equals(System.getProperty(USE_KERBEROS_FOR_IT_OPTION))) {
- if (TRUE.equals(System.getProperty(USE_SSL_FOR_IT_OPTION))) {
- throw new RuntimeException("Cannot use both SSL and Kerberos");
- }
-
- try {
- configureForKerberos(cfg, folder, coreSite, kdc);
- } catch (Exception e) {
- throw new RuntimeException("Failed to initialize KDC", e);
- }
- }
- }
-
- protected void configureForSsl(MiniAccumuloConfigImpl cfg, File folder) {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- if (TRUE.equals(siteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
- // already enabled; don't mess with it
- return;
- }
-
- File sslDir = new File(folder, "ssl");
- assertTrue(sslDir.mkdirs() || sslDir.isDirectory());
- File rootKeystoreFile = new File(sslDir, "root-" + cfg.getInstanceName() + ".jks");
- File localKeystoreFile = new File(sslDir, "local-" + cfg.getInstanceName() + ".jks");
- File publicTruststoreFile = new File(sslDir, "public-" + cfg.getInstanceName() + ".jks");
- final String rootKeystorePassword = "root_keystore_password", truststorePassword = "truststore_password";
- try {
- new CertUtils(Property.RPC_SSL_KEYSTORE_TYPE.getDefaultValue(), "o=Apache Accumulo,cn=MiniAccumuloCluster", "RSA", 2048, "sha1WithRSAEncryption")
- .createAll(rootKeystoreFile, localKeystoreFile, publicTruststoreFile, cfg.getInstanceName(), rootKeystorePassword, cfg.getRootPassword(),
- truststorePassword);
- } catch (Exception e) {
- throw new RuntimeException("error creating MAC keystore", e);
- }
-
- siteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true");
- siteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), localKeystoreFile.getAbsolutePath());
- siteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), cfg.getRootPassword());
- siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), publicTruststoreFile.getAbsolutePath());
- siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword);
- cfg.setSiteConfig(siteConfig);
- }
-
- protected void configureForKerberos(MiniAccumuloConfigImpl cfg, File folder, Configuration coreSite, TestingKdc kdc) throws Exception {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- if (TRUE.equals(siteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
- throw new RuntimeException("Cannot use both SSL and SASL/Kerberos");
- }
-
- if (TRUE.equals(siteConfig.get(Property.INSTANCE_RPC_SASL_ENABLED.getKey()))) {
- // already enabled
- return;
- }
-
- if (null == kdc) {
- throw new IllegalStateException("MiniClusterKdc was null");
- }
-
- log.info("Enabling Kerberos/SASL for minicluster");
-
- // Turn on SASL and set the keytab/principal information
- cfg.setProperty(Property.INSTANCE_RPC_SASL_ENABLED, "true");
- ClusterUser serverUser = kdc.getAccumuloServerUser();
- cfg.setProperty(Property.GENERAL_KERBEROS_KEYTAB, serverUser.getKeytab().getAbsolutePath());
- cfg.setProperty(Property.GENERAL_KERBEROS_PRINCIPAL, serverUser.getPrincipal());
- cfg.setProperty(Property.INSTANCE_SECURITY_AUTHENTICATOR, KerberosAuthenticator.class.getName());
- cfg.setProperty(Property.INSTANCE_SECURITY_AUTHORIZOR, KerberosAuthorizor.class.getName());
- cfg.setProperty(Property.INSTANCE_SECURITY_PERMISSION_HANDLER, KerberosPermissionHandler.class.getName());
- // Piggy-back on the "system user" credential, but use it as a normal KerberosToken, not the SystemToken.
- cfg.setProperty(Property.TRACE_USER, serverUser.getPrincipal());
- cfg.setProperty(Property.TRACE_TOKEN_TYPE, KerberosToken.CLASS_NAME);
-
- // Pass down some KRB5 debug properties
- Map<String,String> systemProperties = cfg.getSystemProperties();
- systemProperties.put(JAVA_SECURITY_KRB5_CONF, System.getProperty(JAVA_SECURITY_KRB5_CONF, ""));
- systemProperties.put(SUN_SECURITY_KRB5_DEBUG, System.getProperty(SUN_SECURITY_KRB5_DEBUG, "false"));
- cfg.setSystemProperties(systemProperties);
-
- // Make sure UserGroupInformation will do the correct login
- coreSite.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
-
- cfg.setRootUserName(kdc.getRootUser().getPrincipal());
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/SharedMiniClusterBase.java b/test/src/test/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
deleted file mode 100644
index 433e035..0000000
--- a/test/src/test/java/org/apache/accumulo/harness/SharedMiniClusterBase.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.harness;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Random;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.cluster.ClusterUsers;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Convenience class which starts a single MAC instance for a test to leverage.
- *
- * There isn't a good way to build this off of the {@link AccumuloClusterHarness} (as would be the logical place) because we need to start the MiniAccumuloCluster in
- * a static BeforeClass-annotated method. Because it is static and invoked before any other BeforeClass methods in the implementation, the actual test classes
- * can't expose any information to tell the base class that it is to perform the one-MAC-per-class semantics.
- */
-public abstract class SharedMiniClusterBase extends AccumuloITBase implements ClusterUsers {
- private static final Logger log = LoggerFactory.getLogger(SharedMiniClusterBase.class);
- public static final String TRUE = Boolean.toString(true);
-
- private static String principal = "root";
- private static String rootPassword;
- private static AuthenticationToken token;
- private static MiniAccumuloClusterImpl cluster;
- private static TestingKdc krb;
-
- @BeforeClass
- public static void startMiniCluster() throws Exception {
- File baseDir = new File(System.getProperty("user.dir") + "/target/mini-tests");
- assertTrue(baseDir.mkdirs() || baseDir.isDirectory());
-
- // Make a shared MAC instance instead of spinning up one per test method
- MiniClusterHarness harness = new MiniClusterHarness();
-
- if (TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION))) {
- krb = new TestingKdc();
- krb.start();
- // Enabled krb auth
- Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- UserGroupInformation.setConfiguration(conf);
- // Login as the client
- ClusterUser rootUser = krb.getRootUser();
- // Get the krb token
- principal = rootUser.getPrincipal();
- token = new KerberosToken(principal, rootUser.getKeytab(), true);
- } else {
- rootPassword = "rootPasswordShared1";
- token = new PasswordToken(rootPassword);
- }
-
- cluster = harness.create(SharedMiniClusterBase.class.getName(), System.currentTimeMillis() + "_" + new Random().nextInt(Short.MAX_VALUE), token, krb);
- cluster.start();
-
- if (null != krb) {
- final String traceTable = Property.TRACE_TABLE.getDefaultValue();
- final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
- // Login as the trace user
- // Open a connector as the system user (ensures the user will exist for us to assign permissions to)
- Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken(systemUser.getPrincipal(), systemUser.getKeytab(), true));
-
- // Then, log back in as the "root" user and do the grant
- UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
- conn = cluster.getConnector(principal, token);
-
- // Create the trace table
- conn.tableOperations().create(traceTable);
-
- // Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
- // to have the ability to read, write and alter the trace table
- conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.READ);
- conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.WRITE);
- conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable, TablePermission.ALTER_TABLE);
- }
- }
-
- @AfterClass
- public static void stopMiniCluster() throws Exception {
- if (null != cluster) {
- try {
- cluster.stop();
- } catch (Exception e) {
- log.error("Failed to stop minicluster", e);
- }
- }
- if (null != krb) {
- try {
- krb.stop();
- } catch (Exception e) {
- log.error("Failed to stop KDC", e);
- }
- }
- }
-
- public static String getRootPassword() {
- return rootPassword;
- }
-
- public static AuthenticationToken getToken() {
- if (token instanceof KerberosToken) {
- try {
- UserGroupInformation.loginUserFromKeytab(getPrincipal(), krb.getRootUser().getKeytab().getAbsolutePath());
- } catch (IOException e) {
- throw new RuntimeException("Failed to login", e);
- }
- }
- return token;
- }
-
- public static String getPrincipal() {
- return principal;
- }
-
- public static MiniAccumuloClusterImpl getCluster() {
- return cluster;
- }
-
- public static File getMiniClusterDir() {
- return cluster.getConfig().getDir();
- }
-
- public static Connector getConnector() {
- try {
- return getCluster().getConnector(principal, getToken());
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-
- public static TestingKdc getKdc() {
- return krb;
- }
-
- @Override
- public ClusterUser getAdminUser() {
- if (null == krb) {
- return new ClusterUser(getPrincipal(), getRootPassword());
- } else {
- return krb.getRootUser();
- }
- }
-
- @Override
- public ClusterUser getUser(int offset) {
- if (null == krb) {
- String user = SharedMiniClusterBase.class.getName() + "_" + testName.getMethodName() + "_" + offset;
- // Password is the username
- return new ClusterUser(user, user);
- } else {
- return krb.getClientPrincipal(offset);
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/harness/TestingKdc.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/TestingKdc.java b/test/src/test/java/org/apache/accumulo/harness/TestingKdc.java
deleted file mode 100644
index 9471274..0000000
--- a/test/src/test/java/org/apache/accumulo/harness/TestingKdc.java
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.harness;
-
-import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.hadoop.minikdc.MiniKdc;
-import org.junit.Assert;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Creates a {@link MiniKdc} for tests to use to exercise secure Accumulo
- */
-public class TestingKdc {
- private static final Logger log = LoggerFactory.getLogger(TestingKdc.class);
-
- public static final int NUM_USERS = 10;
-
- protected MiniKdc kdc = null;
- protected ClusterUser accumuloServerUser = null, accumuloAdmin = null;
- protected List<ClusterUser> clientPrincipals = null;
-
- public final String ORG_NAME = "EXAMPLE", ORG_DOMAIN = "COM";
-
- private String hostname;
- private File keytabDir;
- private boolean started = false;
-
- public TestingKdc() throws Exception {
- this(computeKdcDir(), computeKeytabDir());
- }
-
- private static File computeKdcDir() {
- File targetDir = new File(System.getProperty("user.dir"), "target");
- Assert.assertTrue("Could not find Maven target directory: " + targetDir, targetDir.exists() && targetDir.isDirectory());
-
- // Create the directories: target/kerberos/minikdc
- File kdcDir = new File(new File(targetDir, "kerberos"), "minikdc");
-
- assertTrue(kdcDir.mkdirs() || kdcDir.isDirectory());
-
- return kdcDir;
- }
-
- private static File computeKeytabDir() {
- File targetDir = new File(System.getProperty("user.dir"), "target");
- Assert.assertTrue("Could not find Maven target directory: " + targetDir, targetDir.exists() && targetDir.isDirectory());
-
- // Create the directories: target/kerberos/keytabs
- File keytabDir = new File(new File(targetDir, "kerberos"), "keytabs");
-
- assertTrue(keytabDir.mkdirs() || keytabDir.isDirectory());
-
- return keytabDir;
- }
-
- public TestingKdc(File kdcDir, File keytabDir) throws Exception {
- checkNotNull(kdcDir, "KDC directory was null");
- checkNotNull(keytabDir, "Keytab directory was null");
-
- this.keytabDir = keytabDir;
- this.hostname = InetAddress.getLocalHost().getCanonicalHostName();
-
- log.debug("Starting MiniKdc in {} with keytabs in {}", kdcDir, keytabDir);
-
- Properties kdcConf = MiniKdc.createConf();
- kdcConf.setProperty(MiniKdc.ORG_NAME, ORG_NAME);
- kdcConf.setProperty(MiniKdc.ORG_DOMAIN, ORG_DOMAIN);
- // kdcConf.setProperty(MiniKdc.DEBUG, "true");
- kdc = new MiniKdc(kdcConf, kdcDir);
- }
-
- /**
- * Starts the KDC and creates the principals and their keytabs
- */
- public synchronized void start() throws Exception {
- checkArgument(!started, "KDC was already started");
- kdc.start();
- Thread.sleep(1000);
-
- // Create the identity for accumulo servers
- File accumuloKeytab = new File(keytabDir, "accumulo.keytab");
- String accumuloPrincipal = String.format("accumulo/%s", hostname);
-
- log.info("Creating Kerberos principal {} with keytab {}", accumuloPrincipal, accumuloKeytab);
- kdc.createPrincipal(accumuloKeytab, accumuloPrincipal);
-
- accumuloServerUser = new ClusterUser(qualifyUser(accumuloPrincipal), accumuloKeytab);
-
- // Create the identity for the "root" user
- String rootPrincipal = "root";
- File rootKeytab = new File(keytabDir, rootPrincipal + ".keytab");
-
- log.info("Creating Kerberos principal {} with keytab {}", rootPrincipal, rootKeytab);
- kdc.createPrincipal(rootKeytab, rootPrincipal);
-
- accumuloAdmin = new ClusterUser(qualifyUser(rootPrincipal), rootKeytab);
-
- clientPrincipals = new ArrayList<>(NUM_USERS);
- // Create a number of unprivileged users for tests to use
- for (int i = 1; i <= NUM_USERS; i++) {
- String clientPrincipal = "client" + i;
- File clientKeytab = new File(keytabDir, clientPrincipal + ".keytab");
-
- log.info("Creating Kerberos principal {} with keytab {}", clientPrincipal, clientKeytab);
- kdc.createPrincipal(clientKeytab, clientPrincipal);
-
- clientPrincipals.add(new ClusterUser(qualifyUser(clientPrincipal), clientKeytab));
- }
-
- started = true;
- }
-
- public synchronized void stop() throws Exception {
- checkArgument(started, "KDC is not started");
- kdc.stop();
- started = false;
- }
-
- /**
- * A directory where the automatically-created keytab files are written
- */
- public File getKeytabDir() {
- return keytabDir;
- }
-
- /**
- * A {@link ClusterUser} for Accumulo server processes to use
- */
- public ClusterUser getAccumuloServerUser() {
- checkArgument(started, "The KDC is not started");
- return accumuloServerUser;
- }
-
- /**
- * A {@link ClusterUser} which is the Accumulo "root" user
- */
- public ClusterUser getRootUser() {
- checkArgument(started, "The KDC is not started");
- return accumuloAdmin;
- }
-
- /**
- * The {@link ClusterUser} corresponding to the given offset. Represents an unprivileged user.
- *
- * @param offset
- * The offset to fetch credentials for, valid through {@link #NUM_USERS}
- */
- public ClusterUser getClientPrincipal(int offset) {
- checkArgument(started, "Client principal is not initialized, is the KDC started?");
- checkArgument(offset >= 0 && offset < NUM_USERS, "Offset is invalid, must be non-negative and less than " + NUM_USERS);
- return clientPrincipals.get(offset);
- }
-
- /**
- * @see MiniKdc#createPrincipal(File, String...)
- */
- public void createPrincipal(File keytabFile, String... principals) throws Exception {
- checkArgument(started, "KDC is not started");
- kdc.createPrincipal(keytabFile, principals);
- }
-
- /**
- * @return the name for the realm
- */
- public String getOrgName() {
- return ORG_NAME;
- }
-
- /**
- * @return the domain for the realm
- */
- public String getOrgDomain() {
- return ORG_DOMAIN;
- }
-
- /**
- * Qualify a username (only the primary from the kerberos principal) with the proper realm
- *
- * @param primary
- * The primary or primary and instance
- */
- public String qualifyUser(String primary) {
- return String.format("%s@%s.%s", primary, getOrgName(), getOrgDomain());
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloClusterConfiguration.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloClusterConfiguration.java b/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloClusterConfiguration.java
deleted file mode 100644
index 31ed94a..0000000
--- a/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloClusterConfiguration.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.harness.conf;
-
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.harness.AccumuloClusterHarness.ClusterType;
-
-/**
- * Base functionality that must be provided as configuration to the test
- */
-public interface AccumuloClusterConfiguration {
-
- ClusterType getClusterType();
-
- String getAdminPrincipal();
-
- AuthenticationToken getAdminToken();
-
- ClientConfiguration getClientConf();
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloClusterPropertyConfiguration.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloClusterPropertyConfiguration.java b/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloClusterPropertyConfiguration.java
deleted file mode 100644
index 2300da3..0000000
--- a/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloClusterPropertyConfiguration.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.harness.conf;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Properties;
-
-import org.apache.accumulo.harness.AccumuloClusterHarness.ClusterType;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Base class for extracting configuration values from Java Properties
- */
-public abstract class AccumuloClusterPropertyConfiguration implements AccumuloClusterConfiguration {
- private static final Logger log = LoggerFactory.getLogger(AccumuloClusterPropertyConfiguration.class);
-
- public static final String ACCUMULO_IT_PROPERTIES_FILE = "accumulo.it.properties";
-
- public static final String ACCUMULO_CLUSTER_TYPE_KEY = "accumulo.it.cluster.type";
-
- public static final String ACCUMULO_MINI_PREFIX = "accumulo.it.cluster.mini.";
- public static final String ACCUMULO_STANDALONE_PREFIX = "accumulo.it.cluster.standalone.";
-
- public static final String ACCUMULO_CLUSTER_CLIENT_CONF_KEY = "accumulo.it.cluster.clientconf";
-
- protected ClusterType clusterType;
-
- public static AccumuloClusterPropertyConfiguration get() {
- Properties systemProperties = System.getProperties();
-
- String clusterTypeValue = null, clientConf = null;
- String propertyFile = systemProperties.getProperty(ACCUMULO_IT_PROPERTIES_FILE);
-
- if (null != propertyFile) {
- // Check for properties provided in a file
- File f = new File(propertyFile);
- if (f.exists() && f.isFile() && f.canRead()) {
- Properties fileProperties = new Properties();
- FileReader reader = null;
- try {
- reader = new FileReader(f);
- } catch (FileNotFoundException e) {
- log.warn("Could not read properties from specified file: {}", propertyFile, e);
- }
-
- if (null != reader) {
- try {
- fileProperties.load(reader);
- } catch (IOException e) {
- log.warn("Could not load properties from specified file: {}", propertyFile, e);
- } finally {
- try {
- reader.close();
- } catch (IOException e) {
- log.warn("Could not close reader", e);
- }
- }
-
- clusterTypeValue = fileProperties.getProperty(ACCUMULO_CLUSTER_TYPE_KEY);
- clientConf = fileProperties.getProperty(ACCUMULO_CLUSTER_CLIENT_CONF_KEY);
- }
- } else {
- log.debug("Property file ({}) is not a readable file", propertyFile);
- }
- } else {
- log.debug("No properties file found in {}", ACCUMULO_IT_PROPERTIES_FILE);
- }
-
- if (null == clusterTypeValue) {
- clusterTypeValue = systemProperties.getProperty(ACCUMULO_CLUSTER_TYPE_KEY);
- }
-
- if (null == clientConf) {
- clientConf = systemProperties.getProperty(ACCUMULO_CLUSTER_CLIENT_CONF_KEY);
- }
-
- ClusterType type;
- if (null == clusterTypeValue) {
- type = ClusterType.MINI;
- } else {
- type = ClusterType.valueOf(clusterTypeValue);
- }
-
- log.info("Using {} cluster type from system properties", type);
-
- switch (type) {
- case MINI:
- // we'll let no client conf pass through and expect that the caller will set it after MAC is started
- return new AccumuloMiniClusterConfiguration();
- case STANDALONE:
- if (null == clientConf) {
- throw new RuntimeException("Expected client configuration to be provided: " + ACCUMULO_CLUSTER_CLIENT_CONF_KEY);
- }
- File clientConfFile = new File(clientConf);
- if (!clientConfFile.exists() || !clientConfFile.isFile()) {
- throw new RuntimeException("Client configuration should be a normal file: " + clientConfFile);
- }
- return new StandaloneAccumuloClusterConfiguration(clientConfFile);
- default:
- throw new RuntimeException("Clusters other than MiniAccumuloCluster are not yet implemented");
- }
- }
-
- public Map<String,String> getConfiguration(ClusterType type) {
- Preconditions.checkNotNull(type);
-
- String prefix;
- switch (type) {
- case MINI:
- prefix = ACCUMULO_MINI_PREFIX;
- break;
- case STANDALONE:
- prefix = ACCUMULO_STANDALONE_PREFIX;
- break;
- default:
- throw new IllegalArgumentException("Unknown ClusterType: " + type);
- }
-
- Map<String,String> configuration = new HashMap<String,String>();
-
- Properties systemProperties = System.getProperties();
-
- String propertyFile = systemProperties.getProperty(ACCUMULO_IT_PROPERTIES_FILE);
-
- // Check for properties provided in a file
- if (null != propertyFile) {
- File f = new File(propertyFile);
- if (f.exists() && f.isFile() && f.canRead()) {
- Properties fileProperties = new Properties();
- FileReader reader = null;
- try {
- reader = new FileReader(f);
- } catch (FileNotFoundException e) {
- log.warn("Could not read properties from specified file: {}", propertyFile, e);
- }
-
- if (null != reader) {
- try {
- fileProperties.load(reader);
- loadFromProperties(prefix, fileProperties, configuration);
- } catch (IOException e) {
- log.warn("Could not load properties from specified file: {}", propertyFile, e);
- } finally {
- try {
- reader.close();
- } catch (IOException e) {
- log.warn("Could not close reader", e);
- }
- }
- }
- }
- }
-
- // Load any properties specified directly in the system properties
- loadFromProperties(prefix, systemProperties, configuration);
-
- return configuration;
- }
-
- protected void loadFromProperties(String desiredPrefix, Properties properties, Map<String,String> configuration) {
- for (Entry<Object,Object> entry : properties.entrySet()) {
- if (!(entry.getKey() instanceof String)) {
- continue;
- }
-
- String key = (String) entry.getKey();
- if (key.startsWith(desiredPrefix)) {
- configuration.put(key, (String) entry.getValue());
- }
- }
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloMiniClusterConfiguration.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloMiniClusterConfiguration.java b/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloMiniClusterConfiguration.java
deleted file mode 100644
index 4d233a5..0000000
--- a/test/src/test/java/org/apache/accumulo/harness/conf/AccumuloMiniClusterConfiguration.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.harness.conf;
-
-import java.io.IOException;
-import java.util.Map;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.harness.AccumuloClusterHarness.ClusterType;
-import org.apache.accumulo.harness.MiniClusterHarness;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Extract configuration properties for a MiniAccumuloCluster from Java properties
- */
-public class AccumuloMiniClusterConfiguration extends AccumuloClusterPropertyConfiguration {
- private static final Logger log = LoggerFactory.getLogger(AccumuloMiniClusterConfiguration.class);
- private static final String TRUE = Boolean.toString(true);
-
- public static final String ACCUMULO_MINI_PRINCIPAL_KEY = ACCUMULO_MINI_PREFIX + "principal";
- public static final String ACCUMULO_MINI_PRINCIPAL_DEFAULT = "root";
- public static final String ACCUMULO_MINI_PASSWORD_KEY = ACCUMULO_MINI_PREFIX + "password";
- public static final String ACCUMULO_MINI_PASSWORD_DEFAULT = "rootPassword1";
-
- private final Map<String,String> conf;
- private final boolean saslEnabled;
- private ClientConfiguration clientConf;
-
- public AccumuloMiniClusterConfiguration() {
- ClusterType type = getClusterType();
- if (ClusterType.MINI != type) {
- throw new IllegalStateException("Expected only to see mini cluster state");
- }
-
- this.conf = getConfiguration(type);
- this.saslEnabled = TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION));
- log.debug("SASL is {}enabled", (saslEnabled ? "" : "not "));
- }
-
- @Override
- public String getAdminPrincipal() {
- if (saslEnabled) {
- return AccumuloClusterHarness.getKdc().getRootUser().getPrincipal();
- } else {
- String principal = conf.get(ACCUMULO_MINI_PRINCIPAL_KEY);
- if (null == principal) {
- principal = ACCUMULO_MINI_PRINCIPAL_DEFAULT;
- }
-
- return principal;
- }
- }
-
- @Override
- public AuthenticationToken getAdminToken() {
- if (saslEnabled) {
- // Turn on Kerberos authentication so UGI acts properly
- final Configuration conf = new Configuration(false);
- conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- UserGroupInformation.setConfiguration(conf);
-
- ClusterUser rootUser = AccumuloClusterHarness.getKdc().getRootUser();
- try {
- return new KerberosToken(rootUser.getPrincipal(), rootUser.getKeytab(), true);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- } else {
- String password = conf.get(ACCUMULO_MINI_PASSWORD_KEY);
- if (null == password) {
- password = ACCUMULO_MINI_PASSWORD_DEFAULT;
- }
-
- return new PasswordToken(password);
- }
- }
-
- @Override
- public ClusterType getClusterType() {
- return ClusterType.MINI;
- }
-
- @Override
- public ClientConfiguration getClientConf() {
- return clientConf;
- }
-
- public void setClientConf(ClientConfiguration conf) {
- Preconditions.checkNotNull(conf, "Client configuration was null");
- this.clientConf = conf;
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java b/test/src/test/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java
deleted file mode 100644
index ba9dcef..0000000
--- a/test/src/test/java/org/apache/accumulo/harness/conf/StandaloneAccumuloClusterConfiguration.java
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.harness.conf;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.harness.AccumuloClusterHarness.ClusterType;
-import org.apache.commons.configuration.ConfigurationException;
-import org.apache.hadoop.fs.Path;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Extract connection information to a standalone Accumulo instance from Java properties
- */
-public class StandaloneAccumuloClusterConfiguration extends AccumuloClusterPropertyConfiguration {
- private static final Logger log = LoggerFactory.getLogger(StandaloneAccumuloClusterConfiguration.class);
-
- public static final String ACCUMULO_STANDALONE_ADMIN_PRINCIPAL_KEY = ACCUMULO_STANDALONE_PREFIX + "admin.principal";
- public static final String ACCUMULO_STANDALONE_ADMIN_PRINCIPAL_DEFAULT = "root";
- public static final String ACCUMULO_STANDALONE_PASSWORD_KEY = ACCUMULO_STANDALONE_PREFIX + "admin.password";
- public static final String ACCUMULO_STANDALONE_PASSWORD_DEFAULT = "rootPassword1";
- public static final String ACCUMULO_STANDALONE_ADMIN_KEYTAB_KEY = ACCUMULO_STANDALONE_PREFIX + "admin.keytab";
- public static final String ACCUMULO_STANDALONE_ZOOKEEPERS_KEY = ACCUMULO_STANDALONE_PREFIX + "zookeepers";
- public static final String ACCUMULO_STANDALONE_ZOOKEEPERS_DEFAULT = "localhost";
- public static final String ACCUMULO_STANDALONE_INSTANCE_NAME_KEY = ACCUMULO_STANDALONE_PREFIX + "instance.name";
- public static final String ACCUMULO_STANDALONE_INSTANCE_NAME_DEFAULT = "accumulo";
- public static final String ACCUMULO_STANDALONE_TMP_DIR_KEY = ACCUMULO_STANDALONE_PREFIX + "tmpdir";
- public static final String ACCUMULO_STANDALONE_TMP_DIR_DEFAULT = "/tmp";
- public static final String ACCUMULO_STANDALONE_SERVER_USER = ACCUMULO_STANDALONE_PREFIX + "server.user";
- public static final String ACCUMULO_STANDALONE_SERVER_USER_DEFAULT = "accumulo";
-
- // A set of users we can use to connect to this instances
- public static final String ACCUMULO_STANDALONE_USER_KEY = ACCUMULO_STANDALONE_PREFIX + "users.";
- // Keytabs for the users
- public static final String ACCUMULO_STANDALONE_USER_KEYTABS_KEY = ACCUMULO_STANDALONE_PREFIX + "keytabs.";
- // Passwords for the users
- public static final String ACCUMULO_STANDALONE_USER_PASSWORDS_KEY = ACCUMULO_STANDALONE_PREFIX + "passwords.";
-
- public static final String ACCUMULO_STANDALONE_HOME = ACCUMULO_STANDALONE_PREFIX + "home";
- public static final String ACCUMULO_STANDALONE_CLIENT_CONF = ACCUMULO_STANDALONE_PREFIX + "client.conf";
- public static final String ACCUMULO_STANDALONE_SERVER_CONF = ACCUMULO_STANDALONE_PREFIX + "server.conf";
- public static final String ACCUMULO_STANDALONE_HADOOP_CONF = ACCUMULO_STANDALONE_PREFIX + "hadoop.conf";
-
- private Map<String,String> conf;
- private String serverUser;
- private File clientConfFile;
- private ClientConfiguration clientConf;
- private List<ClusterUser> clusterUsers;
-
- public StandaloneAccumuloClusterConfiguration(File clientConfFile) {
- ClusterType type = getClusterType();
- if (ClusterType.STANDALONE != type) {
- throw new IllegalStateException("Expected only to see standalone cluster state");
- }
-
- this.conf = getConfiguration(type);
- this.clientConfFile = clientConfFile;
- try {
- this.clientConf = new ClientConfiguration(clientConfFile);
- } catch (ConfigurationException e) {
- throw new RuntimeException("Failed to load client configuration from " + clientConfFile);
- }
- // Update instance name if not already set
- if (!clientConf.containsKey(ClientProperty.INSTANCE_NAME.getKey())) {
- clientConf.withInstance(getInstanceName());
- }
- // Update zookeeper hosts if not already set
- if (!clientConf.containsKey(ClientProperty.INSTANCE_ZK_HOST.getKey())) {
- clientConf.withZkHosts(getZooKeepers());
- }
-
- // The user Accumulo is running as
- serverUser = conf.get(ACCUMULO_STANDALONE_SERVER_USER);
- if (null == serverUser) {
- serverUser = ACCUMULO_STANDALONE_SERVER_USER_DEFAULT;
- }
-
- clusterUsers = new ArrayList<>();
- for (Entry<String,String> entry : conf.entrySet()) {
- String key = entry.getKey();
- if (key.startsWith(ACCUMULO_STANDALONE_USER_KEY)) {
- String suffix = key.substring(ACCUMULO_STANDALONE_USER_KEY.length());
- String keytab = conf.get(ACCUMULO_STANDALONE_USER_KEYTABS_KEY + suffix);
- if (null != keytab) {
- File keytabFile = new File(keytab);
- assertTrue("Keytab doesn't exist: " + keytabFile, keytabFile.exists() && keytabFile.isFile());
- clusterUsers.add(new ClusterUser(entry.getValue(), keytabFile));
- } else {
- String password = conf.get(ACCUMULO_STANDALONE_USER_PASSWORDS_KEY + suffix);
- if (null == password) {
- throw new IllegalArgumentException("Missing password or keytab configuration for user with offset " + suffix);
- }
- clusterUsers.add(new ClusterUser(entry.getValue(), password));
- }
- }
- }
- log.info("Initialized Accumulo users with Kerberos keytabs: {}", clusterUsers);
- }
-
- @Override
- public String getAdminPrincipal() {
- String principal = conf.get(ACCUMULO_STANDALONE_ADMIN_PRINCIPAL_KEY);
- if (null == principal) {
- principal = ACCUMULO_STANDALONE_ADMIN_PRINCIPAL_DEFAULT;
- }
- return principal;
- }
-
- public String getPassword() {
- String password = conf.get(ACCUMULO_STANDALONE_PASSWORD_KEY);
- if (null == password) {
- password = ACCUMULO_STANDALONE_PASSWORD_DEFAULT;
- }
- return password;
- }
-
- public File getAdminKeytab() {
- String keytabPath = conf.get(ACCUMULO_STANDALONE_ADMIN_KEYTAB_KEY);
- if (null == keytabPath) {
- throw new RuntimeException("SASL is enabled, but " + ACCUMULO_STANDALONE_ADMIN_KEYTAB_KEY + " was not provided");
- }
- File keytab = new File(keytabPath);
- if (!keytab.exists() || !keytab.isFile()) {
- throw new RuntimeException(keytabPath + " should be a regular file");
- }
- return keytab;
- }
-
- @Override
- public AuthenticationToken getAdminToken() {
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- File keytab = getAdminKeytab();
- try {
- return new KerberosToken(getAdminPrincipal(), keytab, true);
- } catch (IOException e) {
- // The user isn't logged in
- throw new RuntimeException("Failed to create KerberosToken", e);
- }
- } else {
- return new PasswordToken(getPassword());
- }
- }
-
- public String getZooKeepers() {
- if (clientConf.containsKey(ClientProperty.INSTANCE_ZK_HOST.getKey())) {
- return clientConf.get(ClientProperty.INSTANCE_ZK_HOST);
- }
-
- String zookeepers = conf.get(ACCUMULO_STANDALONE_ZOOKEEPERS_KEY);
- if (null == zookeepers) {
- zookeepers = ACCUMULO_STANDALONE_ZOOKEEPERS_DEFAULT;
- }
- return zookeepers;
- }
-
- public String getInstanceName() {
- if (clientConf.containsKey(ClientProperty.INSTANCE_NAME.getKey())) {
- return clientConf.get(ClientProperty.INSTANCE_NAME);
- }
-
- String instanceName = conf.get(ACCUMULO_STANDALONE_INSTANCE_NAME_KEY);
- if (null == instanceName) {
- instanceName = ACCUMULO_STANDALONE_INSTANCE_NAME_DEFAULT;
- }
- return instanceName;
- }
-
- public Instance getInstance() {
- // Make sure the ZKI is created with the ClientConf so it gets things like SASL passed through to the connector
- return new ZooKeeperInstance(clientConf);
- }
-
- @Override
- public ClusterType getClusterType() {
- return ClusterType.STANDALONE;
- }
-
- public String getHadoopConfDir() {
- return conf.get(ACCUMULO_STANDALONE_HADOOP_CONF);
- }
-
- public String getAccumuloHome() {
- return conf.get(ACCUMULO_STANDALONE_HOME);
- }
-
- public String getClientAccumuloConfDir() {
- return conf.get(ACCUMULO_STANDALONE_CLIENT_CONF);
- }
-
- public String getServerAccumuloConfDir() {
- return conf.get(ACCUMULO_STANDALONE_SERVER_CONF);
- }
-
- @Override
- public ClientConfiguration getClientConf() {
- return clientConf;
- }
-
- public File getClientConfFile() {
- return clientConfFile;
- }
-
- public Path getTmpDirectory() {
- String tmpDir = conf.get(ACCUMULO_STANDALONE_TMP_DIR_KEY);
- if (null == tmpDir) {
- tmpDir = ACCUMULO_STANDALONE_TMP_DIR_DEFAULT;
- }
- return new Path(tmpDir);
- }
-
- public List<ClusterUser> getUsers() {
- return Collections.unmodifiableList(clusterUsers);
- }
-
- /**
- * @return The user Accumulo is running as
- */
- public String getAccumuloServerUser() {
- return serverUser;
- }
-}
[34/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyIT.java
new file mode 100644
index 0000000..75eecfd
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ConcurrencyIT.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.EnumSet;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class ConcurrencyIT extends AccumuloClusterHarness {
+
+ static class ScanTask extends Thread {
+
+ int count = 0;
+ Scanner scanner;
+
+ ScanTask(Connector conn, String tableName, long time) throws Exception {
+ scanner = conn.createScanner(tableName, Authorizations.EMPTY);
+ IteratorSetting slow = new IteratorSetting(30, "slow", SlowIterator.class);
+ SlowIterator.setSleepTime(slow, time);
+ scanner.addScanIterator(slow);
+ }
+
+ @Override
+ public void run() {
+ count = Iterators.size(scanner.iterator());
+ }
+
+ }
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ // @formatter:off
+ // Below is a diagram of the operations in this test over time.
+ //
+ // Scan 0 |------------------------------|
+ // Scan 1 |----------|
+ // Minc 1 |-----|
+ // Scan 2 |----------|
+ // Scan 3 |---------------|
+ // Minc 2 |-----|
+ // Majc 1 |-----|
+ // @formatter:on
+ @Test
+ public void run() throws Exception {
+ Connector c = getConnector();
+ runTest(c, getUniqueNames(1)[0]);
+ }
+
+ static void runTest(Connector c, String tableName) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException,
+ MutationsRejectedException, Exception, InterruptedException {
+ c.tableOperations().create(tableName);
+ IteratorSetting is = new IteratorSetting(10, SlowIterator.class);
+ SlowIterator.setSleepTime(is, 50);
+ c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc, IteratorScope.majc));
+ c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
+
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ for (int i = 0; i < 50; i++) {
+ Mutation m = new Mutation(new Text(String.format("%06d", i)));
+ m.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(UTF_8)));
+ bw.addMutation(m);
+ }
+ bw.flush();
+
+ ScanTask st0 = new ScanTask(c, tableName, 300);
+ st0.start();
+
+ ScanTask st1 = new ScanTask(c, tableName, 100);
+ st1.start();
+
+ UtilWaitThread.sleep(50);
+ c.tableOperations().flush(tableName, null, null, true);
+
+ for (int i = 0; i < 50; i++) {
+ Mutation m = new Mutation(new Text(String.format("%06d", i)));
+ m.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(UTF_8)));
+ bw.addMutation(m);
+ }
+
+ bw.flush();
+
+ ScanTask st2 = new ScanTask(c, tableName, 100);
+ st2.start();
+
+ st1.join();
+ st2.join();
+ if (st1.count != 50)
+ throw new Exception("Thread 1 did not see 50, saw " + st1.count);
+
+ if (st2.count != 50)
+ throw new Exception("Thread 2 did not see 50, saw " + st2.count);
+
+ ScanTask st3 = new ScanTask(c, tableName, 150);
+ st3.start();
+
+ UtilWaitThread.sleep(50);
+ c.tableOperations().flush(tableName, null, null, false);
+
+ st3.join();
+ if (st3.count != 50)
+ throw new Exception("Thread 3 did not see 50, saw " + st3.count);
+
+ st0.join();
+ if (st0.count != 50)
+ throw new Exception("Thread 0 did not see 50, saw " + st0.count);
+
+ bw.close();
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java
new file mode 100644
index 0000000..66695e0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Random;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.tserver.compaction.CompactionPlan;
+import org.apache.accumulo.tserver.compaction.CompactionStrategy;
+import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class ConfigurableCompactionIT extends ConfigurableMacBase {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "1s"));
+ }
+
+ public static class SimpleCompactionStrategy extends CompactionStrategy {
+
+ @Override
+ public void init(Map<String,String> options) {
+ String countString = options.get("count");
+ if (countString != null)
+ count = Integer.parseInt(countString);
+ }
+
+ int count = 3;
+
+ @Override
+ public boolean shouldCompact(MajorCompactionRequest request) throws IOException {
+ return request.getFiles().size() == count;
+
+ }
+
+ @Override
+ public CompactionPlan getCompactionPlan(MajorCompactionRequest request) throws IOException {
+ CompactionPlan result = new CompactionPlan();
+ result.inputFiles.addAll(request.getFiles().keySet());
+ return result;
+ }
+
+ }
+
+ @Test
+ public void test() throws Exception {
+ final Connector c = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY.getKey(), SimpleCompactionStrategy.class.getName());
+ runTest(c, tableName, 3);
+ c.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY_PREFIX.getKey() + "count", "" + 5);
+ runTest(c, tableName, 5);
+ }
+
+ @Test
+ public void testPerTableClasspath() throws Exception {
+ final Connector c = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.instanceOperations().setProperty(Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "context1",
+ System.getProperty("user.dir") + "/src/test/resources/TestCompactionStrat.jar");
+ c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "10");
+ c.tableOperations().setProperty(tableName, Property.TABLE_CLASSPATH.getKey(), "context1");
+ // EfgCompactionStrat will only compact a tablet w/ end row of 'efg'. No other tablets are compacted.
+ c.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY.getKey(), "org.apache.accumulo.test.EfgCompactionStrat");
+
+ c.tableOperations().addSplits(tableName, new TreeSet<Text>(Arrays.asList(new Text("efg"))));
+
+ for (char ch = 'a'; ch < 'l'; ch++)
+ writeFlush(c, tableName, ch + "");
+
+ while (countFiles(c, tableName) != 7) {
+ UtilWaitThread.sleep(200);
+ }
+ }
+
+ private void writeFlush(Connector conn, String tablename, String row) throws Exception {
+ BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
+ Mutation m = new Mutation(row);
+ m.put("", "", "");
+ bw.addMutation(m);
+ bw.close();
+ conn.tableOperations().flush(tablename, null, null, true);
+ }
+
+ final static Random r = new Random();
+
+ private void makeFile(Connector conn, String tablename) throws Exception {
+ BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
+ byte[] empty = {};
+ byte[] row = new byte[10];
+ r.nextBytes(row);
+ Mutation m = new Mutation(row, 0, 10);
+ m.put(empty, empty, empty);
+ bw.addMutation(m);
+ bw.flush();
+ bw.close();
+ conn.tableOperations().flush(tablename, null, null, true);
+ }
+
+ private void runTest(final Connector c, final String tableName, final int n) throws Exception {
+ for (int i = countFiles(c, tableName); i < n - 1; i++)
+ makeFile(c, tableName);
+ Assert.assertEquals(n - 1, countFiles(c, tableName));
+ makeFile(c, tableName);
+ for (int i = 0; i < 10; i++) {
+ int count = countFiles(c, tableName);
+ assertTrue(count == 1 || count == n);
+ if (count == 1)
+ break;
+ UtilWaitThread.sleep(1000);
+ }
+ }
+
+ private int countFiles(Connector c, String tableName) throws Exception {
+ Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+ return Iterators.size(s.iterator());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java b/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
new file mode 100644
index 0000000..b86fcfe
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ConfigurableMacBase.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.MonitorUtil;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.harness.AccumuloITBase;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ZooKeeperBindException;
+import org.apache.accumulo.test.util.CertUtils;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * General Integration-Test base class that provides access to a {@link MiniAccumuloCluster} for testing. Tests using these typically do very disruptive things
+ * to the instance, and require specific configuration. Most tests don't need this level of control and should extend {@link AccumuloClusterHarness} instead.
+ */
+public class ConfigurableMacBase extends AccumuloITBase {
+ public static final Logger log = LoggerFactory.getLogger(ConfigurableMacBase.class);
+
+ protected MiniAccumuloClusterImpl cluster;
+
+ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {}
+
+ protected void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws Exception {}
+
+ protected static final String ROOT_PASSWORD = "testRootPassword1";
+
+ public static void configureForEnvironment(MiniAccumuloConfigImpl cfg, Class<?> testClass, File folder) {
+ if ("true".equals(System.getProperty("org.apache.accumulo.test.functional.useSslForIT"))) {
+ configureForSsl(cfg, folder);
+ }
+ if ("true".equals(System.getProperty("org.apache.accumulo.test.functional.useCredProviderForIT"))) {
+ cfg.setUseCredentialProvider(true);
+ }
+ }
+
+ protected static void configureForSsl(MiniAccumuloConfigImpl cfg, File sslDir) {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ if ("true".equals(siteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
+ // already enabled; don't mess with it
+ return;
+ }
+
+ // create parent directories, and ensure sslDir is empty
+ assertTrue(sslDir.mkdirs() || sslDir.isDirectory());
+ FileUtils.deleteQuietly(sslDir);
+ assertTrue(sslDir.mkdir());
+
+ File rootKeystoreFile = new File(sslDir, "root-" + cfg.getInstanceName() + ".jks");
+ File localKeystoreFile = new File(sslDir, "local-" + cfg.getInstanceName() + ".jks");
+ File publicTruststoreFile = new File(sslDir, "public-" + cfg.getInstanceName() + ".jks");
+ final String rootKeystorePassword = "root_keystore_password", truststorePassword = "truststore_password";
+ try {
+ new CertUtils(Property.RPC_SSL_KEYSTORE_TYPE.getDefaultValue(), "o=Apache Accumulo,cn=MiniAccumuloCluster", "RSA", 2048, "sha1WithRSAEncryption")
+ .createAll(rootKeystoreFile, localKeystoreFile, publicTruststoreFile, cfg.getInstanceName(), rootKeystorePassword, cfg.getRootPassword(),
+ truststorePassword);
+ } catch (Exception e) {
+ throw new RuntimeException("error creating MAC keystore", e);
+ }
+
+ siteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true");
+ siteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), localKeystoreFile.getAbsolutePath());
+ siteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), cfg.getRootPassword());
+ siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), publicTruststoreFile.getAbsolutePath());
+ siteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword);
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ createMiniAccumulo();
+ Exception lastException = null;
+ for (int i = 0; i < 3; i++) {
+ try {
+ cluster.start();
+ return;
+ } catch (ZooKeeperBindException e) {
+ lastException = e;
+ log.warn("Failed to start MiniAccumuloCluster, assumably due to ZooKeeper issues", lastException);
+ Thread.sleep(3000);
+ createMiniAccumulo();
+ }
+ }
+ throw new RuntimeException("Failed to start MiniAccumuloCluster after three attempts", lastException);
+ }
+
+ private void createMiniAccumulo() throws Exception {
+ // createTestDir will give us a empty directory, we don't need to clean it up ourselves
+ File baseDir = createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName());
+ MiniAccumuloConfigImpl cfg = new MiniAccumuloConfigImpl(baseDir, ROOT_PASSWORD);
+ String nativePathInDevTree = NativeMapIT.nativeMapLocation().getAbsolutePath();
+ String nativePathInMapReduce = new File(System.getProperty("user.dir")).toString();
+ cfg.setNativeLibPaths(nativePathInDevTree, nativePathInMapReduce);
+ cfg.setProperty(Property.GC_FILE_ARCHIVE, Boolean.TRUE.toString());
+ Configuration coreSite = new Configuration(false);
+ configure(cfg, coreSite);
+ cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, Boolean.TRUE.toString());
+ configureForEnvironment(cfg, getClass(), getSslDir(baseDir));
+ cluster = new MiniAccumuloClusterImpl(cfg);
+ if (coreSite.size() > 0) {
+ File csFile = new File(cluster.getConfig().getConfDir(), "core-site.xml");
+ if (csFile.exists())
+ throw new RuntimeException(csFile + " already exist");
+
+ OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(cluster.getConfig().getConfDir(), "core-site.xml")));
+ coreSite.writeXml(out);
+ out.close();
+ }
+ beforeClusterStart(cfg);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (cluster != null)
+ try {
+ cluster.stop();
+ } catch (Exception e) {
+ // ignored
+ }
+ }
+
+ protected MiniAccumuloClusterImpl getCluster() {
+ return cluster;
+ }
+
+ protected Connector getConnector() throws AccumuloException, AccumuloSecurityException {
+ return getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
+ }
+
+ protected Process exec(Class<?> clazz, String... args) throws IOException {
+ return getCluster().exec(clazz, args);
+ }
+
+ protected String getMonitor() throws KeeperException, InterruptedException {
+ Instance instance = new ZooKeeperInstance(getCluster().getClientConfig());
+ return MonitorUtil.getLocation(instance);
+ }
+
+ protected ClientConfiguration getClientConfig() throws Exception {
+ return new ClientConfiguration(getCluster().getConfig().getClientConfFile());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
new file mode 100644
index 0000000..4ef4a61
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ConstraintIT.java
@@ -0,0 +1,335 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.ConstraintViolationSummary;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint;
+import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ConstraintIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(ConstraintIT.class);
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 30;
+ }
+
+ @Test
+ public void run() throws Exception {
+ String[] tableNames = getUniqueNames(3);
+ Connector c = getConnector();
+ for (String table : tableNames) {
+ c.tableOperations().create(table);
+ c.tableOperations().addConstraint(table, NumericValueConstraint.class.getName());
+ c.tableOperations().addConstraint(table, AlphaNumKeyConstraint.class.getName());
+ }
+
+ // A static sleep to just let ZK do its thing
+ Thread.sleep(10 * 1000);
+
+ // Then check that the client has at least gotten the updates
+ for (String table : tableNames) {
+ log.debug("Checking constraints on {}", table);
+ Map<String,Integer> constraints = c.tableOperations().listConstraints(table);
+ while (!constraints.containsKey(NumericValueConstraint.class.getName()) || !constraints.containsKey(AlphaNumKeyConstraint.class.getName())) {
+ log.debug("Failed to verify constraints. Sleeping and retrying");
+ Thread.sleep(2000);
+ constraints = c.tableOperations().listConstraints(table);
+ }
+ log.debug("Verified all constraints on {}", table);
+ }
+
+ log.debug("Verified constraints on all tables. Running tests");
+
+ test1(tableNames[0]);
+
+ test2(tableNames[1], false);
+ test2(tableNames[2], true);
+ }
+
+ private void test1(String tableName) throws Exception {
+ BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+
+ Mutation mut1 = new Mutation(new Text("r1"));
+ mut1.put(new Text("cf1"), new Text("cq1"), new Value("123".getBytes(UTF_8)));
+
+ bw.addMutation(mut1);
+
+ // should not throw any exceptions
+ bw.close();
+
+ bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+
+ // create a mutation with a non numeric value
+ Mutation mut2 = new Mutation(new Text("r1"));
+ mut2.put(new Text("cf1"), new Text("cq1"), new Value("123a".getBytes(UTF_8)));
+
+ bw.addMutation(mut2);
+
+ boolean sawMRE = false;
+
+ try {
+ bw.close();
+ // should not get here
+ throw new Exception("Test failed, constraint did not catch bad mutation");
+ } catch (MutationsRejectedException mre) {
+ sawMRE = true;
+
+ // verify constraint violation summary
+ List<ConstraintViolationSummary> cvsl = mre.getConstraintViolationSummaries();
+
+ if (cvsl.size() != 1) {
+ throw new Exception("Unexpected constraints");
+ }
+
+ for (ConstraintViolationSummary cvs : cvsl) {
+ if (!cvs.constrainClass.equals(NumericValueConstraint.class.getName())) {
+ throw new Exception("Unexpected constraint class " + cvs.constrainClass);
+ }
+
+ if (cvs.numberOfViolatingMutations != 1) {
+ throw new Exception("Unexpected # violating mutations " + cvs.numberOfViolatingMutations);
+ }
+ }
+ }
+
+ if (!sawMRE) {
+ throw new Exception("Did not see MutationsRejectedException");
+ }
+
+ // verify mutation did not go through
+ Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
+ scanner.setRange(new Range(new Text("r1")));
+
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+ Entry<Key,Value> entry = iter.next();
+
+ if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
+ || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123".getBytes(UTF_8)))) {
+ throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
+ }
+
+ if (iter.hasNext()) {
+ entry = iter.next();
+ throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
+ }
+
+ // remove the numeric value constraint
+ getConnector().tableOperations().removeConstraint(tableName, 2);
+ UtilWaitThread.sleep(1000);
+
+ // now should be able to add a non numeric value
+ bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+ bw.addMutation(mut2);
+ bw.close();
+
+ // verify mutation went through
+ iter = scanner.iterator();
+ entry = iter.next();
+
+ if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
+ || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123a".getBytes(UTF_8)))) {
+ throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
+ }
+
+ if (iter.hasNext()) {
+ entry = iter.next();
+ throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
+ }
+
+ // add a constraint that references a non-existant class
+ getConnector().tableOperations().setProperty(tableName, Property.TABLE_CONSTRAINT_PREFIX + "1", "com.foobar.nonExistantClass");
+ UtilWaitThread.sleep(1000);
+
+ // add a mutation
+ bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+
+ Mutation mut3 = new Mutation(new Text("r1"));
+ mut3.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(UTF_8)));
+
+ bw.addMutation(mut3);
+
+ sawMRE = false;
+
+ try {
+ bw.close();
+ // should not get here
+ throw new Exception("Test failed, mutation went through when table had bad constraints");
+ } catch (MutationsRejectedException mre) {
+ sawMRE = true;
+ }
+
+ if (!sawMRE) {
+ throw new Exception("Did not see MutationsRejectedException");
+ }
+
+ // verify the mutation did not go through
+ iter = scanner.iterator();
+ entry = iter.next();
+
+ if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
+ || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123a".getBytes(UTF_8)))) {
+ throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
+ }
+
+ if (iter.hasNext()) {
+ entry = iter.next();
+ throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
+ }
+
+ // remove the bad constraint
+ getConnector().tableOperations().removeConstraint(tableName, 1);
+ UtilWaitThread.sleep(1000);
+
+ // try the mutation again
+ bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+ bw.addMutation(mut3);
+ bw.close();
+
+ // verify it went through
+ iter = scanner.iterator();
+ entry = iter.next();
+
+ if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
+ || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("foo".getBytes(UTF_8)))) {
+ throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
+ }
+
+ if (iter.hasNext()) {
+ entry = iter.next();
+ throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
+ }
+ }
+
+ private Mutation newMut(String row, String cf, String cq, String val) {
+ Mutation mut1 = new Mutation(new Text(row));
+ mut1.put(new Text(cf), new Text(cq), new Value(val.getBytes(UTF_8)));
+ return mut1;
+ }
+
+ private void test2(String table, boolean doFlush) throws Exception {
+ // test sending multiple mutations with multiple constrain violations... all of the non violating mutations
+ // should go through
+ int numericErrors = 2;
+
+ BatchWriter bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
+ bw.addMutation(newMut("r1", "cf1", "cq1", "123"));
+ bw.addMutation(newMut("r1", "cf1", "cq2", "I'm a bad value"));
+ if (doFlush) {
+ try {
+ bw.flush();
+ throw new Exception("Didn't find a bad mutation");
+ } catch (MutationsRejectedException mre) {
+ // ignored
+ try {
+ bw.close();
+ } catch (MutationsRejectedException ex) {
+ // ignored
+ }
+ bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
+ numericErrors = 1;
+ }
+ }
+ bw.addMutation(newMut("r1", "cf1", "cq3", "I'm a naughty value"));
+ bw.addMutation(newMut("@bad row@", "cf1", "cq2", "456"));
+ bw.addMutation(newMut("r1", "cf1", "cq4", "789"));
+
+ boolean sawMRE = false;
+
+ try {
+ bw.close();
+ // should not get here
+ throw new Exception("Test failed, constraint did not catch bad mutation");
+ } catch (MutationsRejectedException mre) {
+ System.out.println(mre);
+
+ sawMRE = true;
+
+ // verify constraint violation summary
+ List<ConstraintViolationSummary> cvsl = mre.getConstraintViolationSummaries();
+
+ if (cvsl.size() != 2) {
+ throw new Exception("Unexpected constraints");
+ }
+
+ HashMap<String,Integer> expected = new HashMap<String,Integer>();
+
+ expected.put("org.apache.accumulo.examples.simple.constraints.NumericValueConstraint", numericErrors);
+ expected.put("org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint", 1);
+
+ for (ConstraintViolationSummary cvs : cvsl) {
+ if (expected.get(cvs.constrainClass) != cvs.numberOfViolatingMutations) {
+ throw new Exception("Unexpected " + cvs.constrainClass + " " + cvs.numberOfViolatingMutations);
+ }
+ }
+ }
+
+ if (!sawMRE) {
+ throw new Exception("Did not see MutationsRejectedException");
+ }
+
+ Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
+
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+
+ Entry<Key,Value> entry = iter.next();
+
+ if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
+ || !entry.getKey().getColumnQualifier().equals(new Text("cq1")) || !entry.getValue().equals(new Value("123".getBytes(UTF_8)))) {
+ throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
+ }
+
+ entry = iter.next();
+
+ if (!entry.getKey().getRow().equals(new Text("r1")) || !entry.getKey().getColumnFamily().equals(new Text("cf1"))
+ || !entry.getKey().getColumnQualifier().equals(new Text("cq4")) || !entry.getValue().equals(new Value("789".getBytes(UTF_8)))) {
+ throw new Exception("Unexpected key or value " + entry.getKey() + " " + entry.getValue());
+ }
+
+ if (iter.hasNext()) {
+ entry = iter.next();
+ throw new Exception("Unexpected extra key or value " + entry.getKey() + " " + entry.getValue());
+ }
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CreateAndUseIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
new file mode 100644
index 0000000..b2373e6
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CreateAndUseIT.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class CreateAndUseIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ private static SortedSet<Text> splits;
+
+ @BeforeClass
+ public static void createData() throws Exception {
+ splits = new TreeSet<Text>();
+
+ for (int i = 1; i < 256; i++) {
+ splits.add(new Text(String.format("%08x", i << 8)));
+ }
+ }
+
+ @Test
+ public void verifyDataIsPresent() throws Exception {
+ Text cf = new Text("cf1");
+ Text cq = new Text("cq1");
+
+ String tableName = getUniqueNames(1)[0];
+ getConnector().tableOperations().create(tableName);
+ getConnector().tableOperations().addSplits(tableName, splits);
+ BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+
+ for (int i = 1; i < 257; i++) {
+ Mutation m = new Mutation(new Text(String.format("%08x", (i << 8) - 16)));
+ m.put(cf, cq, new Value(Integer.toString(i).getBytes(UTF_8)));
+
+ bw.addMutation(m);
+ }
+
+ bw.close();
+ Scanner scanner1 = getConnector().createScanner(tableName, Authorizations.EMPTY);
+
+ int ei = 1;
+
+ for (Entry<Key,Value> entry : scanner1) {
+ Assert.assertEquals(String.format("%08x", (ei << 8) - 16), entry.getKey().getRow().toString());
+ Assert.assertEquals(Integer.toString(ei), entry.getValue().toString());
+
+ ei++;
+ }
+
+ Assert.assertEquals("Did not see expected number of rows", 257, ei);
+ }
+
+ @Test
+ public void createTableAndScan() throws Exception {
+ String table2 = getUniqueNames(1)[0];
+ getConnector().tableOperations().create(table2);
+ getConnector().tableOperations().addSplits(table2, splits);
+ Scanner scanner2 = getConnector().createScanner(table2, Authorizations.EMPTY);
+ int count = 0;
+ for (Entry<Key,Value> entry : scanner2) {
+ if (entry != null)
+ count++;
+ }
+
+ if (count != 0) {
+ throw new Exception("Did not see expected number of entries, count = " + count);
+ }
+ }
+
+ @Test
+ public void createTableAndBatchScan() throws Exception {
+ ArrayList<Range> ranges = new ArrayList<Range>();
+ for (int i = 1; i < 257; i++) {
+ ranges.add(new Range(new Text(String.format("%08x", (i << 8) - 16))));
+ }
+
+ String table3 = getUniqueNames(1)[0];
+ getConnector().tableOperations().create(table3);
+ getConnector().tableOperations().addSplits(table3, splits);
+ BatchScanner bs = getConnector().createBatchScanner(table3, Authorizations.EMPTY, 3);
+ bs.setRanges(ranges);
+ Iterator<Entry<Key,Value>> iter = bs.iterator();
+ int count = Iterators.size(iter);
+ bs.close();
+
+ Assert.assertEquals("Did not expect to find any entries", 0, count);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
new file mode 100644
index 0000000..79151ee
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.junit.Test;
+
+public class CreateManyScannersIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void run() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ for (int i = 0; i < 100000; i++) {
+ c.createScanner(tableName, Authorizations.EMPTY);
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java
new file mode 100644
index 0000000..b383d0a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CredentialsIT.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.security.SecurityErrorCode;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class CredentialsIT extends AccumuloClusterHarness {
+
+ private boolean saslEnabled;
+ private String username;
+ private String password;
+ private Instance inst;
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Before
+ public void createLocalUser() throws AccumuloException, AccumuloSecurityException {
+ Connector conn = getConnector();
+ inst = conn.getInstance();
+
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ ClusterUser user = getUser(0);
+ username = user.getPrincipal();
+ saslEnabled = clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false);
+ // Create the user if it doesn't exist
+ Set<String> users = conn.securityOperations().listLocalUsers();
+ if (!users.contains(username)) {
+ PasswordToken passwdToken = null;
+ if (!saslEnabled) {
+ password = user.getPassword();
+ passwdToken = new PasswordToken(password);
+ }
+ conn.securityOperations().createLocalUser(username, passwdToken);
+ }
+ }
+
+ @After
+ public void deleteLocalUser() throws Exception {
+ if (saslEnabled) {
+ ClusterUser root = getAdminUser();
+ UserGroupInformation.loginUserFromKeytab(root.getPrincipal(), root.getKeytab().getAbsolutePath());
+ }
+ getConnector().securityOperations().dropLocalUser(username);
+ }
+
+ @Test
+ public void testConnectorWithDestroyedToken() throws Exception {
+ AuthenticationToken token = getUser(0).getToken();
+ assertFalse(token.isDestroyed());
+ token.destroy();
+ assertTrue(token.isDestroyed());
+ try {
+ inst.getConnector("non_existent_user", token);
+ fail();
+ } catch (AccumuloSecurityException e) {
+ assertTrue(e.getSecurityErrorCode().equals(SecurityErrorCode.TOKEN_EXPIRED));
+ }
+ }
+
+ @Test
+ public void testDestroyTokenBeforeRPC() throws Exception {
+ AuthenticationToken token = getUser(0).getToken();
+ Connector userConnector = inst.getConnector(username, token);
+ Scanner scanner = userConnector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ assertFalse(token.isDestroyed());
+ token.destroy();
+ assertTrue(token.isDestroyed());
+ try {
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+ while (iter.hasNext())
+ fail();
+ fail();
+ } catch (Exception e) {
+ assertTrue(e instanceof RuntimeException);
+ assertTrue(e.getCause() instanceof AccumuloSecurityException);
+ assertTrue(AccumuloSecurityException.class.cast(e.getCause()).getSecurityErrorCode().equals(SecurityErrorCode.TOKEN_EXPIRED));
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
new file mode 100644
index 0000000..2650c89
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteEverythingIT.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+
+import java.util.Map;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Iterators;
+
+public class DeleteEverythingIT extends AccumuloClusterHarness {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1s");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ private String majcDelay;
+
+ @Before
+ public void updateMajcDelay() throws Exception {
+ Connector c = getConnector();
+ majcDelay = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
+ c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1s");
+ if (getClusterType() == ClusterType.STANDALONE) {
+ // Gotta wait for the cluster to get out of the default sleep value
+ Thread.sleep(AccumuloConfiguration.getTimeInMillis(majcDelay));
+ }
+ }
+
+ @After
+ public void resetMajcDelay() throws Exception {
+ Connector c = getConnector();
+ c.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
+ }
+
+ @Test
+ public void run() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation(new Text("foo"));
+ m.put(new Text("bar"), new Text("1910"), new Value("5".getBytes(UTF_8)));
+ bw.addMutation(m);
+ bw.flush();
+
+ getConnector().tableOperations().flush(tableName, null, null, true);
+
+ FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 1, 1);
+
+ m = new Mutation(new Text("foo"));
+ m.putDelete(new Text("bar"), new Text("1910"));
+ bw.addMutation(m);
+ bw.flush();
+
+ Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
+ scanner.setRange(new Range());
+ int count = Iterators.size(scanner.iterator());
+ assertEquals("count == " + count, 0, count);
+ getConnector().tableOperations().flush(tableName, null, null, true);
+
+ getConnector().tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
+ UtilWaitThread.sleep(4000);
+
+ FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 0, 0);
+
+ bw.close();
+
+ count = Iterables.size(scanner);
+
+ if (count != 0)
+ throw new Exception("count == " + count);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/DeleteIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeleteIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DeleteIT.java
new file mode 100644
index 0000000..79c4e60
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteIT.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.accumulo.cluster.AccumuloCluster;
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ClientOpts.Password;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.TestRandomDeletes;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+import com.google.common.base.Charsets;
+
+public class DeleteIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ AuthenticationToken token = getAdminToken();
+ if (token instanceof KerberosToken) {
+ deleteTest(c, getCluster(), getAdminPrincipal(), null, tableName, getAdminUser().getKeytab().getAbsolutePath());
+ } else if (token instanceof PasswordToken) {
+ PasswordToken passwdToken = (PasswordToken) token;
+ deleteTest(c, getCluster(), getAdminPrincipal(), new String(passwdToken.getPassword(), Charsets.UTF_8), tableName, null);
+ }
+ }
+
+ public static void deleteTest(Connector c, AccumuloCluster cluster, String user, String password, String tableName, String keytab) throws Exception {
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+ TestIngest.Opts opts = new TestIngest.Opts();
+ vopts.setTableName(tableName);
+ opts.setTableName(tableName);
+ vopts.rows = opts.rows = 1000;
+ vopts.cols = opts.cols = 1;
+ vopts.random = opts.random = 56;
+
+ assertTrue("Expected one of password or keytab", null != password || null != keytab);
+ if (null != password) {
+ assertNull("Given password, expected null keytab", keytab);
+ Password passwd = new Password(password);
+ opts.setPassword(passwd);
+ opts.setPrincipal(user);
+ vopts.setPassword(passwd);
+ vopts.setPrincipal(user);
+ }
+ if (null != keytab) {
+ assertNull("Given keytab, expect null password", password);
+ ClientConfiguration clientConfig = cluster.getClientConfig();
+ opts.updateKerberosCredentials(clientConfig);
+ vopts.updateKerberosCredentials(clientConfig);
+ }
+
+ BatchWriterOpts BWOPTS = new BatchWriterOpts();
+ TestIngest.ingest(c, opts, BWOPTS);
+
+ String[] args = null;
+
+ assertTrue("Expected one of password or keytab", null != password || null != keytab);
+ if (null != password) {
+ assertNull("Given password, expected null keytab", keytab);
+ args = new String[] {"-u", user, "-p", password, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--table", tableName};
+ }
+ if (null != keytab) {
+ assertNull("Given keytab, expect null password", password);
+ args = new String[] {"-u", user, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "--table", tableName, "--keytab", keytab};
+ }
+
+ assertEquals(0, cluster.getClusterControl().exec(TestRandomDeletes.class, args));
+ TestIngest.ingest(c, opts, BWOPTS);
+ VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
new file mode 100644
index 0000000..e4a8451
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsIT.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+
+public class DeleteRowsIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 10 * 60;
+ }
+
+ private static final Logger log = LoggerFactory.getLogger(DeleteRowsIT.class);
+
+ private static final int ROWS_PER_TABLET = 10;
+ private static final String[] LETTERS = new String[] {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
+ "u", "v", "w", "x", "y", "z"};
+ static final SortedSet<Text> SPLITS = new TreeSet<Text>();
+ static {
+ for (String alpha : LETTERS) {
+ SPLITS.add(new Text(alpha));
+ }
+ }
+ static final List<String> ROWS = new ArrayList<String>(Arrays.asList(LETTERS));
+ static {
+ // put data on first and last tablet
+ ROWS.add("A");
+ ROWS.add("{");
+ }
+
+ @Test(timeout = 5 * 60 * 1000)
+ public void testDeleteAllRows() throws Exception {
+ Connector c = getConnector();
+ String[] tableNames = this.getUniqueNames(20);
+ for (String tableName : tableNames) {
+ c.tableOperations().create(tableName);
+ c.tableOperations().deleteRows(tableName, null, null);
+ Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
+ assertEquals(0, Iterators.size(scanner.iterator()));
+ }
+ }
+
+ @Test
+ public void testManyRows() throws Exception {
+ // Delete ranges of rows, and verify the tablets are removed.
+ int i = 0;
+ // Eliminate whole tablets
+ String tableName = getUniqueNames(1)[0];
+ testSplit(tableName + i++, "f", "h", "abcdefijklmnopqrstuvwxyz", 260);
+ // Eliminate whole tablets, partial first tablet
+ testSplit(tableName + i++, "f1", "h", "abcdeff1ijklmnopqrstuvwxyz", 262);
+ // Eliminate whole tablets, partial last tablet
+ testSplit(tableName + i++, "f", "h1", "abcdefijklmnopqrstuvwxyz", 258);
+ // Eliminate whole tablets, partial first and last tablet
+ testSplit(tableName + i++, "f1", "h1", "abcdeff1ijklmnopqrstuvwxyz", 260);
+ // Eliminate one tablet
+ testSplit(tableName + i++, "f", "g", "abcdefhijklmnopqrstuvwxyz", 270);
+ // Eliminate partial tablet, matches start split
+ testSplit(tableName + i++, "f", "f1", "abcdefghijklmnopqrstuvwxyz", 278);
+ // Eliminate partial tablet, matches end split
+ testSplit(tableName + i++, "f1", "g", "abcdeff1hijklmnopqrstuvwxyz", 272);
+ // Eliminate tablets starting at -inf
+ testSplit(tableName + i++, null, "h", "ijklmnopqrstuvwxyz", 200);
+ // Eliminate tablets ending at +inf
+ testSplit(tableName + i++, "t", null, "abcdefghijklmnopqrst", 200);
+ // Eliminate some rows inside one tablet
+ testSplit(tableName + i++, "t0", "t2", "abcdefghijklmnopqrstt0uvwxyz", 278);
+ // Eliminate some rows in the first tablet
+ testSplit(tableName + i++, null, "A1", "abcdefghijklmnopqrstuvwxyz", 278);
+ // Eliminate some rows in the last tablet
+ testSplit(tableName + i++, "{1", null, "abcdefghijklmnopqrstuvwxyz{1", 272);
+ // Delete everything
+ testSplit(tableName + i++, null, null, "", 0);
+ }
+
+ private void testSplit(String table, String start, String end, String result, int entries) throws Exception {
+ // Put a bunch of rows on each tablet
+ Connector c = getConnector();
+ c.tableOperations().create(table);
+ BatchWriter bw = c.createBatchWriter(table, null);
+ for (String row : ROWS) {
+ for (int j = 0; j < ROWS_PER_TABLET; j++) {
+ Mutation m = new Mutation(row + j);
+ m.put("cf", "cq", "value");
+ bw.addMutation(m);
+ }
+ }
+ bw.flush();
+ bw.close();
+ // Split the table
+ c.tableOperations().addSplits(table, SPLITS);
+
+ Text startText = start == null ? null : new Text(start);
+ Text endText = end == null ? null : new Text(end);
+ c.tableOperations().deleteRows(table, startText, endText);
+ Collection<Text> remainingSplits = c.tableOperations().listSplits(table);
+ StringBuilder sb = new StringBuilder();
+ // See that whole tablets are removed
+ for (Text split : remainingSplits)
+ sb.append(split.toString());
+ assertEquals(result, sb.toString());
+ // See that the rows are really deleted
+ Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
+ int count = 0;
+ for (Entry<Key,Value> entry : scanner) {
+ Text row = entry.getKey().getRow();
+ assertTrue((startText == null || row.compareTo(startText) <= 0) || (endText == null || row.compareTo(endText) > 0));
+ assertTrue(startText != null || endText != null);
+ count++;
+ }
+ log.info("Finished table " + table);
+ assertEquals(entries, count);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
new file mode 100644
index 0000000..dcc3124
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteRowsSplitIT.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+// attempt to reproduce ACCUMULO-315
+public class DeleteRowsSplitIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ private static final Logger log = LoggerFactory.getLogger(DeleteRowsSplitIT.class);
+
+ private static final String LETTERS = "abcdefghijklmnopqrstuvwxyz";
+ static final SortedSet<Text> SPLITS = new TreeSet<Text>();
+ static final List<String> ROWS = new ArrayList<String>();
+ static {
+ for (byte b : LETTERS.getBytes(UTF_8)) {
+ SPLITS.add(new Text(new byte[] {b}));
+ ROWS.add(new String(new byte[] {b}, UTF_8));
+ }
+ }
+
+ @Test
+ public void run() throws Exception {
+ // Delete ranges of rows, and verify the are removed
+ // Do this while adding many splits
+ final String tableName = getUniqueNames(1)[0];
+ final Connector conn = getConnector();
+
+ // Eliminate whole tablets
+ for (int test = 0; test < 10; test++) {
+ // create a table
+ log.info("Test " + test);
+ conn.tableOperations().create(tableName);
+
+ // put some data in it
+ fillTable(conn, tableName);
+
+ // generate a random delete range
+ final Text start = new Text();
+ final Text end = new Text();
+ generateRandomRange(start, end);
+
+ // initiate the delete range
+ final boolean fail[] = {false};
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ try {
+ // split the table
+ final SortedSet<Text> afterEnd = SPLITS.tailSet(new Text(end.toString() + "\0"));
+ conn.tableOperations().addSplits(tableName, afterEnd);
+ } catch (Exception ex) {
+ log.error("Exception", ex);
+ synchronized (fail) {
+ fail[0] = true;
+ }
+ }
+ }
+ };
+ t.start();
+
+ UtilWaitThread.sleep(test * 2);
+
+ conn.tableOperations().deleteRows(tableName, start, end);
+
+ t.join();
+ synchronized (fail) {
+ assertTrue(!fail[0]);
+ }
+
+ // scan the table
+ Scanner scanner = conn.createScanner(tableName, Authorizations.EMPTY);
+ for (Entry<Key,Value> entry : scanner) {
+ Text row = entry.getKey().getRow();
+ assertTrue(row.compareTo(start) <= 0 || row.compareTo(end) > 0);
+ }
+
+ // delete the table
+ conn.tableOperations().delete(tableName);
+ }
+ }
+
+ private void generateRandomRange(Text start, Text end) {
+ List<String> bunch = new ArrayList<String>(ROWS);
+ Collections.shuffle(bunch);
+ if (bunch.get(0).compareTo((bunch.get(1))) < 0) {
+ start.set(bunch.get(0));
+ end.set(bunch.get(1));
+ } else {
+ start.set(bunch.get(1));
+ end.set(bunch.get(0));
+ }
+
+ }
+
+ private void fillTable(Connector conn, String table) throws Exception {
+ BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+ for (String row : ROWS) {
+ Mutation m = new Mutation(row);
+ m.put("cf", "cq", "value");
+ bw.addMutation(m);
+ }
+ bw.close();
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
new file mode 100644
index 0000000..7c94163
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeleteTableDuringSplitIT.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertFalse;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.concurrent.Future;
+
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.util.SimpleThreadPool;
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+// ACCUMULO-2361
+public class DeleteTableDuringSplitIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 15 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ // 96 invocations, 8 at a time
+ int batches = 12, batchSize = 8;
+ String[] tableNames = getUniqueNames(batches * batchSize);
+ // make a bunch of tables
+ for (String tableName : tableNames) {
+ getConnector().tableOperations().create(tableName);
+ }
+ final SortedSet<Text> splits = new TreeSet<Text>();
+ for (byte i = 0; i < 100; i++) {
+ splits.add(new Text(new byte[] {0, 0, i}));
+ }
+
+ List<Future<?>> results = new ArrayList<Future<?>>();
+ List<Runnable> tasks = new ArrayList<Runnable>();
+ SimpleThreadPool es = new SimpleThreadPool(batchSize * 2, "concurrent-api-requests");
+ for (String tableName : tableNames) {
+ final String finalName = tableName;
+ tasks.add(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ getConnector().tableOperations().addSplits(finalName, splits);
+ } catch (TableNotFoundException ex) {
+ // expected, ignore
+ } catch (Exception ex) {
+ throw new RuntimeException(finalName, ex);
+ }
+ }
+ });
+ tasks.add(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ UtilWaitThread.sleep(500);
+ getConnector().tableOperations().delete(finalName);
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ });
+ }
+ Iterator<Runnable> itr = tasks.iterator();
+ for (int batch = 0; batch < batches; batch++) {
+ for (int i = 0; i < batchSize; i++) {
+ Future<?> f = es.submit(itr.next());
+ results.add(f);
+ f = es.submit(itr.next());
+ results.add(f);
+ }
+ for (Future<?> f : results) {
+ f.get();
+ }
+ results.clear();
+ }
+ // Shut down the ES
+ List<Runnable> queued = es.shutdownNow();
+ Assert.assertTrue("Had more tasks to run", queued.isEmpty());
+ Assert.assertFalse("Had more tasks that needed to be submitted", itr.hasNext());
+ for (String tableName : tableNames) {
+ assertFalse(getConnector().tableOperations().exists(tableName));
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java
new file mode 100644
index 0000000..ca8003a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DeletedTablesDontFlushIT.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.EnumSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.junit.Test;
+
+// ACCUMULO-2880
+public class DeletedTablesDontFlushIT extends SharedMiniClusterBase {
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ IteratorSetting setting = new IteratorSetting(100, SlowIterator.class);
+ SlowIterator.setSleepTime(setting, 1000);
+ c.tableOperations().attachIterator(tableName, setting, EnumSet.of(IteratorScope.minc));
+ // let the configuration change propagate through zookeeper
+ UtilWaitThread.sleep(1000);
+
+ Mutation m = new Mutation("xyzzy");
+ for (int i = 0; i < 100; i++) {
+ m.put("cf", "" + i, new Value(new byte[] {}));
+ }
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ bw.addMutation(m);
+ bw.close();
+ // should go fast
+ c.tableOperations().delete(tableName);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/DurabilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/DurabilityIT.java b/test/src/main/java/org/apache/accumulo/test/functional/DurabilityIT.java
new file mode 100644
index 0000000..49e004f
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/DurabilityIT.java
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+
+public class DurabilityIT extends ConfigurableMacBase {
+ private static final Logger log = LoggerFactory.getLogger(DurabilityIT.class);
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ cfg.setNumTservers(1);
+ }
+
+ static final long N = 100000;
+
+ private String[] init() throws Exception {
+ String[] tableNames = getUniqueNames(4);
+ Connector c = getConnector();
+ TableOperations tableOps = c.tableOperations();
+ createTable(tableNames[0]);
+ createTable(tableNames[1]);
+ createTable(tableNames[2]);
+ createTable(tableNames[3]);
+ // default is sync
+ tableOps.setProperty(tableNames[1], Property.TABLE_DURABILITY.getKey(), "flush");
+ tableOps.setProperty(tableNames[2], Property.TABLE_DURABILITY.getKey(), "log");
+ tableOps.setProperty(tableNames[3], Property.TABLE_DURABILITY.getKey(), "none");
+ return tableNames;
+ }
+
+ private void cleanup(String[] tableNames) throws Exception {
+ Connector c = getConnector();
+ for (String tableName : tableNames) {
+ c.tableOperations().delete(tableName);
+ }
+ }
+
+ private void createTable(String tableName) throws Exception {
+ TableOperations tableOps = getConnector().tableOperations();
+ tableOps.create(tableName);
+ }
+
+ @Test(timeout = 2 * 60 * 1000)
+ public void testWriteSpeed() throws Exception {
+ TableOperations tableOps = getConnector().tableOperations();
+ String tableNames[] = init();
+ // write some gunk, delete the table to keep that table from messing with the performance numbers of successive calls
+ // sync
+ long t0 = writeSome(tableNames[0], N);
+ tableOps.delete(tableNames[0]);
+ // flush
+ long t1 = writeSome(tableNames[1], N);
+ tableOps.delete(tableNames[1]);
+ // log
+ long t2 = writeSome(tableNames[2], N);
+ tableOps.delete(tableNames[2]);
+ // none
+ long t3 = writeSome(tableNames[3], N);
+ tableOps.delete(tableNames[3]);
+ System.out.println(String.format("sync %d flush %d log %d none %d", t0, t1, t2, t3));
+ assertTrue("flush should be faster than sync", t0 > t1);
+ assertTrue("log should be faster than flush", t1 > t2);
+ assertTrue("no durability should be faster than log", t2 > t3);
+ }
+
+ @Test(timeout = 4 * 60 * 1000)
+ public void testSync() throws Exception {
+ String tableNames[] = init();
+ // sync table should lose nothing
+ writeSome(tableNames[0], N);
+ restartTServer();
+ assertEquals(N, readSome(tableNames[0]));
+ cleanup(tableNames);
+ }
+
+ @Test(timeout = 4 * 60 * 1000)
+ public void testFlush() throws Exception {
+ String tableNames[] = init();
+ // flush table won't lose anything since we're not losing power/dfs
+ writeSome(tableNames[1], N);
+ restartTServer();
+ assertEquals(N, readSome(tableNames[1]));
+ cleanup(tableNames);
+ }
+
+ @Test(timeout = 4 * 60 * 1000)
+ public void testLog() throws Exception {
+ String tableNames[] = init();
+ // we're probably going to lose something the the log setting
+ writeSome(tableNames[2], N);
+ restartTServer();
+ long numResults = readSome(tableNames[2]);
+ assertTrue("Expected " + N + " >= " + numResults, N >= numResults);
+ cleanup(tableNames);
+ }
+
+ @Test(timeout = 4 * 60 * 1000)
+ public void testNone() throws Exception {
+ String tableNames[] = init();
+ // probably won't get any data back without logging
+ writeSome(tableNames[3], N);
+ restartTServer();
+ long numResults = readSome(tableNames[3]);
+ assertTrue("Expected " + N + " >= " + numResults, N >= numResults);
+ cleanup(tableNames);
+ }
+
+ @Test(timeout = 4 * 60 * 1000)
+ public void testIncreaseDurability() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "none");
+ writeSome(tableName, N);
+ restartTServer();
+ long numResults = readSome(tableName);
+ assertTrue("Expected " + N + " >= " + numResults, N >= numResults);
+ c.tableOperations().setProperty(tableName, Property.TABLE_DURABILITY.getKey(), "sync");
+ writeSome(tableName, N);
+ restartTServer();
+ assertTrue(N == readSome(tableName));
+ }
+
+ private static Map<String,String> map(Iterable<Entry<String,String>> entries) {
+ Map<String,String> result = new HashMap<String,String>();
+ for (Entry<String,String> entry : entries) {
+ result.put(entry.getKey(), entry.getValue());
+ }
+ return result;
+ }
+
+ @Test(timeout = 4 * 60 * 1000)
+ public void testMetaDurability() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.instanceOperations().setProperty(Property.TABLE_DURABILITY.getKey(), "none");
+ Map<String,String> props = map(c.tableOperations().getProperties(MetadataTable.NAME));
+ assertEquals("sync", props.get(Property.TABLE_DURABILITY.getKey()));
+ c.tableOperations().create(tableName);
+ props = map(c.tableOperations().getProperties(tableName));
+ assertEquals("none", props.get(Property.TABLE_DURABILITY.getKey()));
+ restartTServer();
+ assertTrue(c.tableOperations().exists(tableName));
+ }
+
+ private long readSome(String table) throws Exception {
+ return Iterators.size(getConnector().createScanner(table, Authorizations.EMPTY).iterator());
+ }
+
+ private void restartTServer() throws Exception {
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+ cluster.start();
+ }
+
+ private long writeSome(String table, long count) throws Exception {
+ int iterations = 5;
+ long[] attempts = new long[iterations];
+ for (int attempt = 0; attempt < iterations; attempt++) {
+ long now = System.currentTimeMillis();
+ Connector c = getConnector();
+ BatchWriter bw = c.createBatchWriter(table, null);
+ for (int i = 1; i < count + 1; i++) {
+ Mutation m = new Mutation("" + i);
+ m.put("", "", "");
+ bw.addMutation(m);
+ if (i % (Math.max(1, count / 100)) == 0) {
+ bw.flush();
+ }
+ }
+ bw.close();
+ attempts[attempt] = System.currentTimeMillis() - now;
+ }
+ Arrays.sort(attempts);
+ log.info("Attempt durations: {}", Arrays.toString(attempts));
+ // Return the median duration
+ return attempts[2];
+ }
+
+}
[31/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java
new file mode 100644
index 0000000..9455456
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MetadataIT.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class MetadataIT extends AccumuloClusterHarness {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ }
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void testFlushAndCompact() throws Exception {
+ Connector c = getConnector();
+ String tableNames[] = getUniqueNames(2);
+
+ // create a table to write some data to metadata table
+ c.tableOperations().create(tableNames[0]);
+
+ Scanner rootScanner = c.createScanner(RootTable.NAME, Authorizations.EMPTY);
+ rootScanner.setRange(MetadataSchema.TabletsSection.getRange());
+ rootScanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+
+ Set<String> files1 = new HashSet<String>();
+ for (Entry<Key,Value> entry : rootScanner)
+ files1.add(entry.getKey().getColumnQualifier().toString());
+
+ c.tableOperations().create(tableNames[1]);
+ c.tableOperations().flush(MetadataTable.NAME, null, null, true);
+
+ Set<String> files2 = new HashSet<String>();
+ for (Entry<Key,Value> entry : rootScanner)
+ files2.add(entry.getKey().getColumnQualifier().toString());
+
+ // flush of metadata table should change file set in root table
+ Assert.assertTrue(files2.size() > 0);
+ Assert.assertNotEquals(files1, files2);
+
+ c.tableOperations().compact(MetadataTable.NAME, null, null, false, true);
+
+ Set<String> files3 = new HashSet<String>();
+ for (Entry<Key,Value> entry : rootScanner)
+ files3.add(entry.getKey().getColumnQualifier().toString());
+
+ // compaction of metadata table should change file set in root table
+ Assert.assertNotEquals(files2, files3);
+ }
+
+ @Test
+ public void mergeMeta() throws Exception {
+ Connector c = getConnector();
+ String[] names = getUniqueNames(5);
+ SortedSet<Text> splits = new TreeSet<Text>();
+ for (String id : "1 2 3 4 5".split(" ")) {
+ splits.add(new Text(id));
+ }
+ c.tableOperations().addSplits(MetadataTable.NAME, splits);
+ for (String tableName : names) {
+ c.tableOperations().create(tableName);
+ }
+ c.tableOperations().merge(MetadataTable.NAME, null, null);
+ Scanner s = c.createScanner(RootTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.DeletesSection.getRange());
+ while (Iterators.size(s.iterator()) == 0) {
+ UtilWaitThread.sleep(100);
+ }
+ assertEquals(0, c.tableOperations().listSplits(MetadataTable.NAME).size());
+ }
+
+ @Test
+ public void batchScanTest() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+
+ // batch scan regular metadata table
+ BatchScanner s = c.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
+ s.setRanges(Collections.singleton(new Range()));
+ int count = 0;
+ for (Entry<Key,Value> e : s) {
+ if (e != null)
+ count++;
+ }
+ s.close();
+ assertTrue(count > 0);
+
+ // batch scan root metadata table
+ s = c.createBatchScanner(RootTable.NAME, Authorizations.EMPTY, 1);
+ s.setRanges(Collections.singleton(new Range()));
+ count = 0;
+ for (Entry<Key,Value> e : s) {
+ if (e != null)
+ count++;
+ }
+ s.close();
+ assertTrue(count > 0);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
new file mode 100644
index 0000000..086dd1a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MetadataMaxFilesIT.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.impl.ClientContext;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.impl.MasterClient;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.master.thrift.MasterClientService.Client;
+import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
+import org.apache.accumulo.core.master.thrift.TableInfo;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.trace.Tracer;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MetadataMaxFilesIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = new HashMap<String,String>();
+ siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
+ siteConfig.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "10");
+ cfg.setSiteConfig(siteConfig);
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ SortedSet<Text> splits = new TreeSet<Text>();
+ for (int i = 0; i < 1000; i++) {
+ splits.add(new Text(String.format("%03d", i)));
+ }
+ c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10000");
+ for (int i = 0; i < 5; i++) {
+ String tableName = "table" + i;
+ log.info("Creating " + tableName);
+ c.tableOperations().create(tableName);
+ log.info("adding splits");
+ c.tableOperations().addSplits(tableName, splits);
+ log.info("flushing");
+ c.tableOperations().flush(MetadataTable.NAME, null, null, true);
+ c.tableOperations().flush(RootTable.NAME, null, null, true);
+ }
+ UtilWaitThread.sleep(20 * 1000);
+ log.info("shutting down");
+ assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+ cluster.stop();
+ log.info("starting up");
+ cluster.start();
+
+ UtilWaitThread.sleep(30 * 1000);
+
+ while (true) {
+ MasterMonitorInfo stats = null;
+ Credentials creds = new Credentials("root", new PasswordToken(ROOT_PASSWORD));
+ Client client = null;
+ try {
+ ClientContext context = new ClientContext(c.getInstance(), creds, getClientConfig());
+ client = MasterClient.getConnectionWithRetry(context);
+ stats = client.getMasterStats(Tracer.traceInfo(), context.rpcCreds());
+ } finally {
+ if (client != null)
+ MasterClient.close(client);
+ }
+ int tablets = 0;
+ for (TabletServerStatus tserver : stats.tServerInfo) {
+ for (Entry<String,TableInfo> entry : tserver.tableMap.entrySet()) {
+ if (entry.getKey().startsWith("!") || entry.getKey().startsWith("+"))
+ continue;
+ tablets += entry.getValue().onlineTablets;
+ }
+ }
+ if (tablets == 5005)
+ break;
+ UtilWaitThread.sleep(1000);
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MetadataSplitIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
new file mode 100644
index 0000000..ab2c791
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Collections;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+public class MetadataSplitIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "100ms"));
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ assertEquals(1, c.tableOperations().listSplits(MetadataTable.NAME).size());
+ c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "500");
+ for (int i = 0; i < 10; i++) {
+ c.tableOperations().create("table" + i);
+ c.tableOperations().flush(MetadataTable.NAME, null, null, true);
+ }
+ UtilWaitThread.sleep(10 * 1000);
+ assertTrue(c.tableOperations().listSplits(MetadataTable.NAME).size() > 2);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java
new file mode 100644
index 0000000..c59c52e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MonitorLoggingIT.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.net.URL;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.monitor.Monitor;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.commons.io.FileUtils;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class MonitorLoggingIT extends ConfigurableMacBase {
+ private static final Logger log = LoggerFactory.getLogger(MonitorLoggingIT.class);
+
+ @Override
+ public void beforeClusterStart(MiniAccumuloConfigImpl cfg) throws Exception {
+ cfg.setNumTservers(1);
+ File confDir = cfg.getConfDir();
+ try {
+ FileUtils.copyFileToDirectory(new File(MonitorLoggingIT.class.getResource("/conf/generic_logger.xml").toURI()), confDir);
+ FileUtils.copyFileToDirectory(new File(MonitorLoggingIT.class.getResource("/conf/monitor_logger.xml").toURI()), confDir);
+ } catch (Exception e) {
+ log.error("Failed to copy Log4J XML files to conf dir", e);
+ }
+ }
+
+ private static final int NUM_LOCATION_PASSES = 5;
+ private static final int LOCATION_DELAY_SECS = 5;
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 30 + ((NUM_LOCATION_PASSES + 2) * LOCATION_DELAY_SECS);
+ }
+
+ @Test
+ public void logToMonitor() throws Exception {
+ // Start the monitor.
+ log.debug("Starting Monitor");
+ Process monitor = cluster.exec(Monitor.class);
+
+ // Get monitor location to ensure it is running.
+ String monitorLocation = null;
+ for (int i = 0; i < NUM_LOCATION_PASSES; i++) {
+ Thread.sleep(LOCATION_DELAY_SECS * 1000);
+ try {
+ monitorLocation = getMonitor();
+ break;
+ } catch (KeeperException e) {
+ log.debug("Monitor not up yet, trying again in " + LOCATION_DELAY_SECS + " secs");
+ }
+ }
+ assertNotNull("Monitor failed to start within " + (LOCATION_DELAY_SECS * NUM_LOCATION_PASSES) + " secs", monitorLocation);
+ log.debug("Monitor running at " + monitorLocation);
+
+ // The tserver has to observe that the log-forwarding address
+ // changed in ZooKeeper. If we cause the error before the tserver
+ // updates, we'll never see the error on the monitor.
+ Thread.sleep(10000);
+
+ // Attempt a scan with an invalid iterator to force a log message in the monitor.
+ try {
+ Connector c = getConnector();
+ Scanner s = c.createScanner("accumulo.root", new Authorizations());
+ IteratorSetting cfg = new IteratorSetting(100, "incorrect", "java.lang.String");
+ s.addScanIterator(cfg);
+ s.iterator().next();
+ } catch (RuntimeException e) {
+ // expected, the iterator was bad
+ }
+
+ String result = "";
+ while (true) {
+ Thread.sleep(LOCATION_DELAY_SECS * 1000); // extra precaution to ensure monitor has opportunity to log
+
+ // Verify messages were received at the monitor.
+ URL url = new URL("http://" + monitorLocation + "/log");
+ log.debug("Fetching web page " + url);
+ result = FunctionalTestUtils.readAll(url.openStream());
+ if (result.contains("<pre class='logevent'>")) {
+ break;
+ }
+ log.debug("No messages found, waiting a little longer...");
+ }
+
+ assertTrue("No log messages found", result.contains("<pre class='logevent'>"));
+
+ // Shutdown cleanly.
+ log.debug("Stopping mini accumulo cluster");
+ Process shutdown = cluster.exec(Admin.class, "stopAll");
+ shutdown.waitFor();
+ assertTrue(shutdown.exitValue() == 0);
+ log.debug("success!");
+ monitor.destroy();
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/MonitorSslIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MonitorSslIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MonitorSslIT.java
new file mode 100644
index 0000000..7283c4d
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MonitorSslIT.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.net.URL;
+import java.security.KeyManagementException;
+import java.security.NoSuchAlgorithmException;
+import java.security.SecureRandom;
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
+import java.util.Map;
+
+import javax.net.ssl.HostnameVerifier;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.KeyManager;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLSession;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.X509TrustManager;
+
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.MonitorUtil;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Check SSL for the Monitor
+ *
+ */
+public class MonitorSslIT extends ConfigurableMacBase {
+ @BeforeClass
+ public static void initHttps() throws NoSuchAlgorithmException, KeyManagementException {
+ SSLContext ctx = SSLContext.getInstance("SSL");
+ TrustManager[] tm = new TrustManager[] {new TestTrustManager()};
+ ctx.init(new KeyManager[0], tm, new SecureRandom());
+ SSLContext.setDefault(ctx);
+ HttpsURLConnection.setDefaultSSLSocketFactory(ctx.getSocketFactory());
+ HttpsURLConnection.setDefaultHostnameVerifier(new TestHostnameVerifier());
+ }
+
+ private static class TestTrustManager implements X509TrustManager {
+ @Override
+ public void checkClientTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
+
+ @Override
+ public void checkServerTrusted(X509Certificate[] arg0, String arg1) throws CertificateException {}
+
+ @Override
+ public X509Certificate[] getAcceptedIssuers() {
+ return null;
+ }
+ }
+
+ private static class TestHostnameVerifier implements HostnameVerifier {
+ @Override
+ public boolean verify(String hostname, SSLSession session) {
+ return true;
+ }
+ }
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 6 * 60;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ super.configure(cfg, hadoopCoreSite);
+ File baseDir = createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName());
+ configureForSsl(cfg, getSslDir(baseDir));
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.MONITOR_SSL_KEYSTORE.getKey(), siteConfig.get(Property.RPC_SSL_KEYSTORE_PATH.getKey()));
+ siteConfig.put(Property.MONITOR_SSL_KEYSTOREPASS.getKey(), siteConfig.get(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey()));
+ if (siteConfig.containsKey(Property.RPC_SSL_KEYSTORE_TYPE.getKey())) {
+ siteConfig.put(Property.MONITOR_SSL_KEYSTORETYPE.getKey(), siteConfig.get(Property.RPC_SSL_KEYSTORE_TYPE.getKey()));
+ } else {
+ siteConfig.put(Property.MONITOR_SSL_KEYSTORETYPE.getKey(), Property.RPC_SSL_KEYSTORE_TYPE.getDefaultValue());
+ }
+ siteConfig.put(Property.MONITOR_SSL_TRUSTSTORE.getKey(), siteConfig.get(Property.RPC_SSL_TRUSTSTORE_PATH.getKey()));
+ siteConfig.put(Property.MONITOR_SSL_TRUSTSTOREPASS.getKey(), siteConfig.get(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey()));
+ if (siteConfig.containsKey(Property.RPC_SSL_TRUSTSTORE_TYPE.getKey())) {
+ siteConfig.put(Property.MONITOR_SSL_TRUSTSTORETYPE.getKey(), siteConfig.get(Property.RPC_SSL_TRUSTSTORE_TYPE.getKey()));
+ } else {
+ siteConfig.put(Property.MONITOR_SSL_TRUSTSTORETYPE.getKey(), Property.RPC_SSL_TRUSTSTORE_TYPE.getDefaultValue());
+ }
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Test
+ public void test() throws Exception {
+ log.debug("Starting Monitor");
+ cluster.getClusterControl().startAllServers(ServerType.MONITOR);
+ String monitorLocation = null;
+ while (null == monitorLocation) {
+ try {
+ monitorLocation = MonitorUtil.getLocation(getConnector().getInstance());
+ } catch (Exception e) {
+ // ignored
+ }
+ if (null == monitorLocation) {
+ log.debug("Could not fetch monitor HTTP address from zookeeper");
+ Thread.sleep(2000);
+ }
+ }
+ URL url = new URL("https://" + monitorLocation);
+ log.debug("Fetching web page " + url);
+ String result = FunctionalTestUtils.readAll(url.openStream());
+ assertTrue(result.length() > 100);
+ assertTrue(result.indexOf("Accumulo Overview") >= 0);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/NativeMapIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/NativeMapIT.java b/test/src/main/java/org/apache/accumulo/test/functional/NativeMapIT.java
new file mode 100644
index 0000000..9175379
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/NativeMapIT.java
@@ -0,0 +1,613 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.NoSuchElementException;
+import java.util.Random;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.tserver.NativeMap;
+import org.apache.hadoop.io.Text;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class NativeMapIT {
+
+ private Key nk(int r) {
+ return new Key(new Text(String.format("r%09d", r)));
+ }
+
+ private Key nk(int r, int cf, int cq, int cv, int ts, boolean deleted) {
+ Key k = new Key(new Text(String.format("r%09d", r)), new Text(String.format("cf%09d", cf)), new Text(String.format("cq%09d", cq)), new Text(String.format(
+ "cv%09d", cv)), ts);
+
+ k.setDeleted(deleted);
+
+ return k;
+ }
+
+ private Value nv(int v) {
+ return new Value(String.format("r%09d", v).getBytes(UTF_8));
+ }
+
+ public static File nativeMapLocation() {
+ File projectDir = new File(System.getProperty("user.dir")).getParentFile();
+ File nativeMapDir = new File(projectDir, "server/native/target/accumulo-native-" + Constants.VERSION + "/accumulo-native-" + Constants.VERSION);
+ return nativeMapDir;
+ }
+
+ @BeforeClass
+ public static void setUp() {
+ NativeMap.loadNativeLib(Collections.singletonList(nativeMapLocation()));
+ }
+
+ private void verifyIterator(int start, int end, int valueOffset, Iterator<Entry<Key,Value>> iter) {
+ for (int i = start; i <= end; i++) {
+ assertTrue(iter.hasNext());
+ Entry<Key,Value> entry = iter.next();
+ assertEquals(nk(i), entry.getKey());
+ assertEquals(nv(i + valueOffset), entry.getValue());
+ }
+
+ assertFalse(iter.hasNext());
+ }
+
+ private void insertAndVerify(NativeMap nm, int start, int end, int valueOffset) {
+ for (int i = start; i <= end; i++) {
+ nm.put(nk(i), nv(i + valueOffset));
+ }
+
+ for (int i = start; i <= end; i++) {
+ Value v = nm.get(nk(i));
+ assertNotNull(v);
+ assertEquals(nv(i + valueOffset), v);
+
+ Iterator<Entry<Key,Value>> iter2 = nm.iterator(nk(i));
+ assertTrue(iter2.hasNext());
+ Entry<Key,Value> entry = iter2.next();
+ assertEquals(nk(i), entry.getKey());
+ assertEquals(nv(i + valueOffset), entry.getValue());
+ }
+
+ assertNull(nm.get(nk(start - 1)));
+
+ assertNull(nm.get(nk(end + 1)));
+
+ Iterator<Entry<Key,Value>> iter = nm.iterator();
+ verifyIterator(start, end, valueOffset, iter);
+
+ for (int i = start; i <= end; i++) {
+ iter = nm.iterator(nk(i));
+ verifyIterator(i, end, valueOffset, iter);
+
+ // lookup nonexistant key that falls after existing key
+ iter = nm.iterator(nk(i, 1, 1, 1, 1, false));
+ verifyIterator(i + 1, end, valueOffset, iter);
+ }
+
+ assertEquals(end - start + 1, nm.size());
+ }
+
+ private void insertAndVerifyExhaustive(NativeMap nm, int num, int run) {
+ for (int i = 0; i < num; i++) {
+ for (int j = 0; j < num; j++) {
+ for (int k = 0; k < num; k++) {
+ for (int l = 0; l < num; l++) {
+ for (int ts = 0; ts < num; ts++) {
+ Key key = nk(i, j, k, l, ts, true);
+ Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(UTF_8));
+
+ nm.put(key, value);
+
+ key = nk(i, j, k, l, ts, false);
+ value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(UTF_8));
+
+ nm.put(key, value);
+ }
+ }
+ }
+ }
+ }
+
+ Iterator<Entry<Key,Value>> iter = nm.iterator();
+
+ for (int i = 0; i < num; i++) {
+ for (int j = 0; j < num; j++) {
+ for (int k = 0; k < num; k++) {
+ for (int l = 0; l < num; l++) {
+ for (int ts = num - 1; ts >= 0; ts--) {
+ Key key = nk(i, j, k, l, ts, true);
+ Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(UTF_8));
+
+ assertTrue(iter.hasNext());
+ Entry<Key,Value> entry = iter.next();
+ assertEquals(key, entry.getKey());
+ assertEquals(value, entry.getValue());
+
+ key = nk(i, j, k, l, ts, false);
+ value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(UTF_8));
+
+ assertTrue(iter.hasNext());
+ entry = iter.next();
+ assertEquals(key, entry.getKey());
+ assertEquals(value, entry.getValue());
+ }
+ }
+ }
+ }
+ }
+
+ assertFalse(iter.hasNext());
+
+ for (int i = 0; i < num; i++) {
+ for (int j = 0; j < num; j++) {
+ for (int k = 0; k < num; k++) {
+ for (int l = 0; l < num; l++) {
+ for (int ts = 0; ts < num; ts++) {
+ Key key = nk(i, j, k, l, ts, true);
+ Value value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + true + "_" + run).getBytes(UTF_8));
+
+ assertEquals(value, nm.get(key));
+
+ Iterator<Entry<Key,Value>> iter2 = nm.iterator(key);
+ assertTrue(iter2.hasNext());
+ Entry<Key,Value> entry = iter2.next();
+ assertEquals(key, entry.getKey());
+ assertEquals(value, entry.getValue());
+
+ key = nk(i, j, k, l, ts, false);
+ value = new Value((i + "_" + j + "_" + k + "_" + l + "_" + ts + "_" + false + "_" + run).getBytes(UTF_8));
+
+ assertEquals(value, nm.get(key));
+
+ Iterator<Entry<Key,Value>> iter3 = nm.iterator(key);
+ assertTrue(iter3.hasNext());
+ Entry<Key,Value> entry2 = iter3.next();
+ assertEquals(key, entry2.getKey());
+ assertEquals(value, entry2.getValue());
+ }
+ }
+ }
+ }
+ }
+
+ assertEquals(num * num * num * num * num * 2, nm.size());
+ }
+
+ @Test
+ public void test1() {
+ NativeMap nm = new NativeMap();
+ Iterator<Entry<Key,Value>> iter = nm.iterator();
+ assertFalse(iter.hasNext());
+ nm.delete();
+ }
+
+ @Test
+ public void test2() {
+ NativeMap nm = new NativeMap();
+
+ insertAndVerify(nm, 1, 10, 0);
+ insertAndVerify(nm, 1, 10, 1);
+ insertAndVerify(nm, 1, 10, 2);
+
+ nm.delete();
+ }
+
+ @Test
+ public void test4() {
+ NativeMap nm = new NativeMap();
+
+ insertAndVerifyExhaustive(nm, 3, 0);
+ insertAndVerifyExhaustive(nm, 3, 1);
+
+ nm.delete();
+ }
+
+ @Test
+ public void test5() {
+ NativeMap nm = new NativeMap();
+
+ insertAndVerify(nm, 1, 10, 0);
+
+ Iterator<Entry<Key,Value>> iter = nm.iterator();
+ iter.next();
+
+ nm.delete();
+
+ try {
+ nm.put(nk(1), nv(1));
+ assertTrue(false);
+ } catch (IllegalStateException e) {
+
+ }
+
+ try {
+ nm.get(nk(1));
+ assertTrue(false);
+ } catch (IllegalStateException e) {
+
+ }
+
+ try {
+ nm.iterator();
+ assertTrue(false);
+ } catch (IllegalStateException e) {
+
+ }
+
+ try {
+ nm.iterator(nk(1));
+ assertTrue(false);
+ } catch (IllegalStateException e) {
+
+ }
+
+ try {
+ nm.size();
+ assertTrue(false);
+ } catch (IllegalStateException e) {
+
+ }
+
+ try {
+ iter.next();
+ assertTrue(false);
+ } catch (IllegalStateException e) {
+
+ }
+
+ }
+
+ @Test
+ public void test7() {
+ NativeMap nm = new NativeMap();
+
+ insertAndVerify(nm, 1, 10, 0);
+
+ nm.delete();
+
+ try {
+ nm.delete();
+ assertTrue(false);
+ } catch (IllegalStateException e) {
+
+ }
+ }
+
+ @Test
+ public void test8() {
+ // test verifies that native map sorts keys sharing some common prefix properly
+
+ NativeMap nm = new NativeMap();
+
+ TreeMap<Key,Value> tm = new TreeMap<Key,Value>();
+
+ tm.put(new Key(new Text("fo")), new Value(new byte[] {'0'}));
+ tm.put(new Key(new Text("foo")), new Value(new byte[] {'1'}));
+ tm.put(new Key(new Text("foo1")), new Value(new byte[] {'2'}));
+ tm.put(new Key(new Text("foo2")), new Value(new byte[] {'3'}));
+
+ for (Entry<Key,Value> entry : tm.entrySet()) {
+ nm.put(entry.getKey(), entry.getValue());
+ }
+
+ Iterator<Entry<Key,Value>> iter = nm.iterator();
+
+ for (Entry<Key,Value> entry : tm.entrySet()) {
+ assertTrue(iter.hasNext());
+ Entry<Key,Value> entry2 = iter.next();
+
+ assertEquals(entry.getKey(), entry2.getKey());
+ assertEquals(entry.getValue(), entry2.getValue());
+ }
+
+ assertFalse(iter.hasNext());
+
+ nm.delete();
+ }
+
+ @Test
+ public void test9() {
+ NativeMap nm = new NativeMap();
+
+ Iterator<Entry<Key,Value>> iter = nm.iterator();
+
+ try {
+ iter.next();
+ assertTrue(false);
+ } catch (NoSuchElementException e) {
+
+ }
+
+ insertAndVerify(nm, 1, 1, 0);
+
+ iter = nm.iterator();
+ iter.next();
+
+ try {
+ iter.next();
+ assertTrue(false);
+ } catch (NoSuchElementException e) {
+
+ }
+
+ nm.delete();
+ }
+
+ @Test
+ public void test10() {
+ int start = 1;
+ int end = 10000;
+
+ NativeMap nm = new NativeMap();
+ for (int i = start; i <= end; i++) {
+ nm.put(nk(i), nv(i));
+ }
+
+ long mem1 = nm.getMemoryUsed();
+
+ for (int i = start; i <= end; i++) {
+ nm.put(nk(i), nv(i));
+ }
+
+ long mem2 = nm.getMemoryUsed();
+
+ if (mem1 != mem2) {
+ throw new RuntimeException("Memory changed after inserting duplicate data " + mem1 + " " + mem2);
+ }
+
+ for (int i = start; i <= end; i++) {
+ nm.put(nk(i), nv(i));
+ }
+
+ long mem3 = nm.getMemoryUsed();
+
+ if (mem1 != mem3) {
+ throw new RuntimeException("Memory changed after inserting duplicate data " + mem1 + " " + mem3);
+ }
+
+ byte bigrow[] = new byte[1000000];
+ byte bigvalue[] = new byte[bigrow.length];
+
+ for (int i = 0; i < bigrow.length; i++) {
+ bigrow[i] = (byte) (0xff & (i % 256));
+ bigvalue[i] = bigrow[i];
+ }
+
+ nm.put(new Key(new Text(bigrow)), new Value(bigvalue));
+
+ long mem4 = nm.getMemoryUsed();
+
+ Value val = nm.get(new Key(new Text(bigrow)));
+ if (val == null || !val.equals(new Value(bigvalue))) {
+ throw new RuntimeException("Did not get expected big value");
+ }
+
+ nm.put(new Key(new Text(bigrow)), new Value(bigvalue));
+
+ long mem5 = nm.getMemoryUsed();
+
+ if (mem4 != mem5) {
+ throw new RuntimeException("Memory changed after inserting duplicate data " + mem4 + " " + mem5);
+ }
+
+ val = nm.get(new Key(new Text(bigrow)));
+ if (val == null || !val.equals(new Value(bigvalue))) {
+ throw new RuntimeException("Did not get expected big value");
+ }
+
+ nm.delete();
+ }
+
+ // random length random field
+ private static byte[] rlrf(Random r, int maxLen) {
+ int len = r.nextInt(maxLen);
+
+ byte f[] = new byte[len];
+ r.nextBytes(f);
+
+ return f;
+ }
+
+ @Test
+ public void test11() {
+ NativeMap nm = new NativeMap();
+
+ // insert things with varying field sizes and value sizes
+
+ // generate random data
+ Random r = new Random(75);
+
+ ArrayList<Pair<Key,Value>> testData = new ArrayList<Pair<Key,Value>>();
+
+ for (int i = 0; i < 100000; i++) {
+
+ Key k = new Key(rlrf(r, 97), rlrf(r, 13), rlrf(r, 31), rlrf(r, 11), (r.nextLong() & 0x7fffffffffffffffl), false, false);
+ Value v = new Value(rlrf(r, 511));
+
+ testData.add(new Pair<Key,Value>(k, v));
+ }
+
+ // insert unsorted data
+ for (Pair<Key,Value> pair : testData) {
+ nm.put(pair.getFirst(), pair.getSecond());
+ }
+
+ for (int i = 0; i < 2; i++) {
+
+ // sort data
+ Collections.sort(testData, new Comparator<Pair<Key,Value>>() {
+ @Override
+ public int compare(Pair<Key,Value> o1, Pair<Key,Value> o2) {
+ return o1.getFirst().compareTo(o2.getFirst());
+ }
+ });
+
+ // verify
+ Iterator<Entry<Key,Value>> iter1 = nm.iterator();
+ Iterator<Pair<Key,Value>> iter2 = testData.iterator();
+
+ while (iter1.hasNext() && iter2.hasNext()) {
+ Entry<Key,Value> e = iter1.next();
+ Pair<Key,Value> p = iter2.next();
+
+ if (!e.getKey().equals(p.getFirst()))
+ throw new RuntimeException("Keys not equal");
+
+ if (!e.getValue().equals(p.getSecond()))
+ throw new RuntimeException("Values not equal");
+ }
+
+ if (iter1.hasNext())
+ throw new RuntimeException("Not all of native map consumed");
+
+ if (iter2.hasNext())
+ throw new RuntimeException("Not all of test data consumed");
+
+ System.out.println("test 11 nm mem " + nm.getMemoryUsed());
+
+ // insert data again w/ different value
+ Collections.shuffle(testData, r);
+ // insert unsorted data
+ for (Pair<Key,Value> pair : testData) {
+ pair.getSecond().set(rlrf(r, 511));
+ nm.put(pair.getFirst(), pair.getSecond());
+ }
+ }
+
+ nm.delete();
+ }
+
+ @Test
+ public void testBinary() {
+ NativeMap nm = new NativeMap();
+
+ byte emptyBytes[] = new byte[0];
+
+ for (int i = 0; i < 256; i++) {
+ for (int j = 0; j < 256; j++) {
+ byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
+ byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
+
+ Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
+ Value v = new Value(data);
+
+ nm.put(k, v);
+ }
+ }
+
+ Iterator<Entry<Key,Value>> iter = nm.iterator();
+ for (int i = 0; i < 256; i++) {
+ for (int j = 0; j < 256; j++) {
+ byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
+ byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
+
+ Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
+ Value v = new Value(data);
+
+ assertTrue(iter.hasNext());
+ Entry<Key,Value> entry = iter.next();
+
+ assertEquals(k, entry.getKey());
+ assertEquals(v, entry.getValue());
+
+ }
+ }
+
+ assertFalse(iter.hasNext());
+
+ for (int i = 0; i < 256; i++) {
+ for (int j = 0; j < 256; j++) {
+ byte row[] = new byte[] {'r', (byte) (0xff & i), (byte) (0xff & j)};
+ byte data[] = new byte[] {'v', (byte) (0xff & i), (byte) (0xff & j)};
+
+ Key k = new Key(row, emptyBytes, emptyBytes, emptyBytes, 1);
+ Value v = new Value(data);
+
+ Value v2 = nm.get(k);
+
+ assertEquals(v, v2);
+ }
+ }
+
+ nm.delete();
+ }
+
+ @Test
+ public void testEmpty() {
+ NativeMap nm = new NativeMap();
+
+ assertTrue(nm.size() == 0);
+ assertTrue(nm.getMemoryUsed() == 0);
+
+ nm.delete();
+ }
+
+ @Test
+ public void testConcurrentIter() throws IOException {
+ NativeMap nm = new NativeMap();
+
+ nm.put(nk(0), nv(0));
+ nm.put(nk(1), nv(1));
+ nm.put(nk(3), nv(3));
+
+ SortedKeyValueIterator<Key,Value> iter = nm.skvIterator();
+
+ // modify map after iter created
+ nm.put(nk(2), nv(2));
+
+ assertTrue(iter.hasTop());
+ assertEquals(iter.getTopKey(), nk(0));
+ iter.next();
+
+ assertTrue(iter.hasTop());
+ assertEquals(iter.getTopKey(), nk(1));
+ iter.next();
+
+ assertTrue(iter.hasTop());
+ assertEquals(iter.getTopKey(), nk(2));
+ iter.next();
+
+ assertTrue(iter.hasTop());
+ assertEquals(iter.getTopKey(), nk(3));
+ iter.next();
+
+ assertFalse(iter.hasTop());
+
+ nm.delete();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java b/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
new file mode 100644
index 0000000..8700891
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/PermissionsIT.java
@@ -0,0 +1,707 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.security.SecurityErrorCode;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.SystemPermission;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+// This test verifies the default permissions so a clean instance must be used. A shared instance might
+// not be representative of a fresh installation.
+public class PermissionsIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(PermissionsIT.class);
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ @Before
+ public void limitToMini() throws Exception {
+ Assume.assumeTrue(ClusterType.MINI == getClusterType());
+ Connector c = getConnector();
+ Set<String> users = c.securityOperations().listLocalUsers();
+ ClusterUser user = getUser(0);
+ if (users.contains(user.getPrincipal())) {
+ c.securityOperations().dropLocalUser(user.getPrincipal());
+ }
+ }
+
+ private void loginAs(ClusterUser user) throws IOException {
+ // Force a re-login as the provided user
+ user.getToken();
+ }
+
+ @Test
+ public void systemPermissionsTest() throws Exception {
+ ClusterUser testUser = getUser(0), rootUser = getAdminUser();
+
+ // verify that the test is being run by root
+ Connector c = getConnector();
+ verifyHasOnlyTheseSystemPermissions(c, c.whoami(), SystemPermission.values());
+
+ // create the test user
+ String principal = testUser.getPrincipal();
+ AuthenticationToken token = testUser.getToken();
+ PasswordToken passwordToken = null;
+ if (token instanceof PasswordToken) {
+ passwordToken = (PasswordToken) token;
+ }
+ loginAs(rootUser);
+ c.securityOperations().createLocalUser(principal, passwordToken);
+ loginAs(testUser);
+ Connector test_user_conn = c.getInstance().getConnector(principal, token);
+ loginAs(rootUser);
+ verifyHasNoSystemPermissions(c, principal, SystemPermission.values());
+
+ // test each permission
+ for (SystemPermission perm : SystemPermission.values()) {
+ log.debug("Verifying the " + perm + " permission");
+
+ // test permission before and after granting it
+ String tableNamePrefix = getUniqueNames(1)[0];
+ testMissingSystemPermission(tableNamePrefix, c, rootUser, test_user_conn, testUser, perm);
+ loginAs(rootUser);
+ c.securityOperations().grantSystemPermission(principal, perm);
+ verifyHasOnlyTheseSystemPermissions(c, principal, perm);
+ testGrantedSystemPermission(tableNamePrefix, c, rootUser, test_user_conn, testUser, perm);
+ loginAs(rootUser);
+ c.securityOperations().revokeSystemPermission(principal, perm);
+ verifyHasNoSystemPermissions(c, principal, perm);
+ }
+ }
+
+ static Map<String,String> map(Iterable<Entry<String,String>> i) {
+ Map<String,String> result = new HashMap<String,String>();
+ for (Entry<String,String> e : i) {
+ result.put(e.getKey(), e.getValue());
+ }
+ return result;
+ }
+
+ private void testMissingSystemPermission(String tableNamePrefix, Connector root_conn, ClusterUser rootUser, Connector test_user_conn, ClusterUser testUser,
+ SystemPermission perm) throws Exception {
+ String tableName, user, password = "password", namespace;
+ boolean passwordBased = testUser.getPassword() != null;
+ log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
+
+ // test permission prior to granting it
+ switch (perm) {
+ case CREATE_TABLE:
+ tableName = tableNamePrefix + "__CREATE_TABLE_WITHOUT_PERM_TEST__";
+ try {
+ loginAs(testUser);
+ test_user_conn.tableOperations().create(tableName);
+ throw new IllegalStateException("Should NOT be able to create a table");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || root_conn.tableOperations().list().contains(tableName))
+ throw e;
+ }
+ break;
+ case DROP_TABLE:
+ tableName = tableNamePrefix + "__DROP_TABLE_WITHOUT_PERM_TEST__";
+ loginAs(rootUser);
+ root_conn.tableOperations().create(tableName);
+ try {
+ loginAs(testUser);
+ test_user_conn.tableOperations().delete(tableName);
+ throw new IllegalStateException("Should NOT be able to delete a table");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.tableOperations().list().contains(tableName))
+ throw e;
+ }
+ break;
+ case ALTER_TABLE:
+ tableName = tableNamePrefix + "__ALTER_TABLE_WITHOUT_PERM_TEST__";
+ loginAs(rootUser);
+ root_conn.tableOperations().create(tableName);
+ try {
+ loginAs(testUser);
+ test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+ throw new IllegalStateException("Should NOT be able to set a table property");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+ || map(root_conn.tableOperations().getProperties(tableName)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+ throw e;
+ }
+ loginAs(rootUser);
+ root_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+ try {
+ loginAs(testUser);
+ test_user_conn.tableOperations().removeProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey());
+ throw new IllegalStateException("Should NOT be able to remove a table property");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+ || !map(root_conn.tableOperations().getProperties(tableName)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+ throw e;
+ }
+ String table2 = tableName + "2";
+ try {
+ loginAs(testUser);
+ test_user_conn.tableOperations().rename(tableName, table2);
+ throw new IllegalStateException("Should NOT be able to rename a table");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.tableOperations().list().contains(tableName)
+ || root_conn.tableOperations().list().contains(table2))
+ throw e;
+ }
+ break;
+ case CREATE_USER:
+ user = "__CREATE_USER_WITHOUT_PERM_TEST__";
+ try {
+ loginAs(testUser);
+ test_user_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
+ throw new IllegalStateException("Should NOT be able to create a user");
+ } catch (AccumuloSecurityException e) {
+ AuthenticationToken userToken = testUser.getToken();
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+ || (userToken instanceof PasswordToken && root_conn.securityOperations().authenticateUser(user, userToken)))
+ throw e;
+ }
+ break;
+ case DROP_USER:
+ user = "__DROP_USER_WITHOUT_PERM_TEST__";
+ loginAs(rootUser);
+ root_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
+ try {
+ loginAs(testUser);
+ test_user_conn.securityOperations().dropLocalUser(user);
+ throw new IllegalStateException("Should NOT be able to delete a user");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.securityOperations().listLocalUsers().contains(user)) {
+ log.info("Failed to authenticate as " + user);
+ throw e;
+ }
+ }
+ break;
+ case ALTER_USER:
+ user = "__ALTER_USER_WITHOUT_PERM_TEST__";
+ loginAs(rootUser);
+ root_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
+ try {
+ loginAs(testUser);
+ test_user_conn.securityOperations().changeUserAuthorizations(user, new Authorizations("A", "B"));
+ throw new IllegalStateException("Should NOT be able to alter a user");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
+ throw e;
+ }
+ break;
+ case SYSTEM:
+ // test for system permission would go here
+ break;
+ case CREATE_NAMESPACE:
+ namespace = "__CREATE_NAMESPACE_WITHOUT_PERM_TEST__";
+ try {
+ loginAs(testUser);
+ test_user_conn.namespaceOperations().create(namespace);
+ throw new IllegalStateException("Should NOT be able to create a namespace");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || root_conn.namespaceOperations().list().contains(namespace))
+ throw e;
+ }
+ break;
+ case DROP_NAMESPACE:
+ namespace = "__DROP_NAMESPACE_WITHOUT_PERM_TEST__";
+ loginAs(rootUser);
+ root_conn.namespaceOperations().create(namespace);
+ try {
+ loginAs(testUser);
+ test_user_conn.namespaceOperations().delete(namespace);
+ throw new IllegalStateException("Should NOT be able to delete a namespace");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.namespaceOperations().list().contains(namespace))
+ throw e;
+ }
+ break;
+ case ALTER_NAMESPACE:
+ namespace = "__ALTER_NAMESPACE_WITHOUT_PERM_TEST__";
+ loginAs(rootUser);
+ root_conn.namespaceOperations().create(namespace);
+ try {
+ loginAs(testUser);
+ test_user_conn.namespaceOperations().setProperty(namespace, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+ throw new IllegalStateException("Should NOT be able to set a namespace property");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+ || map(root_conn.namespaceOperations().getProperties(namespace)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+ throw e;
+ }
+ loginAs(rootUser);
+ root_conn.namespaceOperations().setProperty(namespace, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+ try {
+ loginAs(testUser);
+ test_user_conn.namespaceOperations().removeProperty(namespace, Property.TABLE_BLOOM_ERRORRATE.getKey());
+ throw new IllegalStateException("Should NOT be able to remove a namespace property");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+ || !map(root_conn.namespaceOperations().getProperties(namespace)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+ throw e;
+ }
+ String namespace2 = namespace + "2";
+ try {
+ loginAs(testUser);
+ test_user_conn.namespaceOperations().rename(namespace, namespace2);
+ throw new IllegalStateException("Should NOT be able to rename a namespace");
+ } catch (AccumuloSecurityException e) {
+ loginAs(rootUser);
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.namespaceOperations().list().contains(namespace)
+ || root_conn.namespaceOperations().list().contains(namespace2))
+ throw e;
+ }
+ break;
+ case OBTAIN_DELEGATION_TOKEN:
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ // TODO Try to obtain a delegation token without the permission
+ }
+ break;
+ case GRANT:
+ loginAs(testUser);
+ try {
+ test_user_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(), SystemPermission.GRANT);
+ throw new IllegalStateException("Should NOT be able to grant System.GRANT to yourself");
+ } catch (AccumuloSecurityException e) {
+ // Expected
+ loginAs(rootUser);
+ assertFalse(root_conn.securityOperations().hasSystemPermission(testUser.getPrincipal(), SystemPermission.GRANT));
+ }
+ break;
+ default:
+ throw new IllegalArgumentException("Unrecognized System Permission: " + perm);
+ }
+ }
+
+ private void testGrantedSystemPermission(String tableNamePrefix, Connector root_conn, ClusterUser rootUser, Connector test_user_conn, ClusterUser testUser,
+ SystemPermission perm) throws Exception {
+ String tableName, user, password = "password", namespace;
+ boolean passwordBased = testUser.getPassword() != null;
+ log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
+
+ // test permission after granting it
+ switch (perm) {
+ case CREATE_TABLE:
+ tableName = tableNamePrefix + "__CREATE_TABLE_WITH_PERM_TEST__";
+ loginAs(testUser);
+ test_user_conn.tableOperations().create(tableName);
+ loginAs(rootUser);
+ if (!root_conn.tableOperations().list().contains(tableName))
+ throw new IllegalStateException("Should be able to create a table");
+ break;
+ case DROP_TABLE:
+ tableName = tableNamePrefix + "__DROP_TABLE_WITH_PERM_TEST__";
+ loginAs(rootUser);
+ root_conn.tableOperations().create(tableName);
+ loginAs(testUser);
+ test_user_conn.tableOperations().delete(tableName);
+ loginAs(rootUser);
+ if (root_conn.tableOperations().list().contains(tableName))
+ throw new IllegalStateException("Should be able to delete a table");
+ break;
+ case ALTER_TABLE:
+ tableName = tableNamePrefix + "__ALTER_TABLE_WITH_PERM_TEST__";
+ String table2 = tableName + "2";
+ loginAs(rootUser);
+ root_conn.tableOperations().create(tableName);
+ loginAs(testUser);
+ test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+ loginAs(rootUser);
+ Map<String,String> properties = map(root_conn.tableOperations().getProperties(tableName));
+ if (!properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+ throw new IllegalStateException("Should be able to set a table property");
+ loginAs(testUser);
+ test_user_conn.tableOperations().removeProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey());
+ loginAs(rootUser);
+ properties = map(root_conn.tableOperations().getProperties(tableName));
+ if (properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+ throw new IllegalStateException("Should be able to remove a table property");
+ loginAs(testUser);
+ test_user_conn.tableOperations().rename(tableName, table2);
+ loginAs(rootUser);
+ if (root_conn.tableOperations().list().contains(tableName) || !root_conn.tableOperations().list().contains(table2))
+ throw new IllegalStateException("Should be able to rename a table");
+ break;
+ case CREATE_USER:
+ user = "__CREATE_USER_WITH_PERM_TEST__";
+ loginAs(testUser);
+ test_user_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
+ loginAs(rootUser);
+ if (passwordBased && !root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+ throw new IllegalStateException("Should be able to create a user");
+ break;
+ case DROP_USER:
+ user = "__DROP_USER_WITH_PERM_TEST__";
+ loginAs(rootUser);
+ root_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
+ loginAs(testUser);
+ test_user_conn.securityOperations().dropLocalUser(user);
+ loginAs(rootUser);
+ if (passwordBased && root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+ throw new IllegalStateException("Should be able to delete a user");
+ break;
+ case ALTER_USER:
+ user = "__ALTER_USER_WITH_PERM_TEST__";
+ loginAs(rootUser);
+ root_conn.securityOperations().createLocalUser(user, (passwordBased ? new PasswordToken(password) : null));
+ loginAs(testUser);
+ test_user_conn.securityOperations().changeUserAuthorizations(user, new Authorizations("A", "B"));
+ loginAs(rootUser);
+ if (root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
+ throw new IllegalStateException("Should be able to alter a user");
+ break;
+ case SYSTEM:
+ // test for system permission would go here
+ break;
+ case CREATE_NAMESPACE:
+ namespace = "__CREATE_NAMESPACE_WITH_PERM_TEST__";
+ loginAs(testUser);
+ test_user_conn.namespaceOperations().create(namespace);
+ loginAs(rootUser);
+ if (!root_conn.namespaceOperations().list().contains(namespace))
+ throw new IllegalStateException("Should be able to create a namespace");
+ break;
+ case DROP_NAMESPACE:
+ namespace = "__DROP_NAMESPACE_WITH_PERM_TEST__";
+ loginAs(rootUser);
+ root_conn.namespaceOperations().create(namespace);
+ loginAs(testUser);
+ test_user_conn.namespaceOperations().delete(namespace);
+ loginAs(rootUser);
+ if (root_conn.namespaceOperations().list().contains(namespace))
+ throw new IllegalStateException("Should be able to delete a namespace");
+ break;
+ case ALTER_NAMESPACE:
+ namespace = "__ALTER_NAMESPACE_WITH_PERM_TEST__";
+ String namespace2 = namespace + "2";
+ loginAs(rootUser);
+ root_conn.namespaceOperations().create(namespace);
+ loginAs(testUser);
+ test_user_conn.namespaceOperations().setProperty(namespace, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+ loginAs(rootUser);
+ Map<String,String> propies = map(root_conn.namespaceOperations().getProperties(namespace));
+ if (!propies.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+ throw new IllegalStateException("Should be able to set a table property");
+ loginAs(testUser);
+ test_user_conn.namespaceOperations().removeProperty(namespace, Property.TABLE_BLOOM_ERRORRATE.getKey());
+ loginAs(rootUser);
+ propies = map(root_conn.namespaceOperations().getProperties(namespace));
+ if (propies.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+ throw new IllegalStateException("Should be able to remove a table property");
+ loginAs(testUser);
+ test_user_conn.namespaceOperations().rename(namespace, namespace2);
+ loginAs(rootUser);
+ if (root_conn.namespaceOperations().list().contains(namespace) || !root_conn.namespaceOperations().list().contains(namespace2))
+ throw new IllegalStateException("Should be able to rename a table");
+ break;
+ case OBTAIN_DELEGATION_TOKEN:
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ // TODO Try to obtain a delegation token with the permission
+ }
+ break;
+ case GRANT:
+ loginAs(rootUser);
+ root_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(), SystemPermission.GRANT);
+ loginAs(testUser);
+ test_user_conn.securityOperations().grantSystemPermission(testUser.getPrincipal(), SystemPermission.CREATE_TABLE);
+ loginAs(rootUser);
+ assertTrue("Test user should have CREATE_TABLE",
+ root_conn.securityOperations().hasSystemPermission(testUser.getPrincipal(), SystemPermission.CREATE_TABLE));
+ assertTrue("Test user should have GRANT", root_conn.securityOperations().hasSystemPermission(testUser.getPrincipal(), SystemPermission.GRANT));
+ root_conn.securityOperations().revokeSystemPermission(testUser.getPrincipal(), SystemPermission.CREATE_TABLE);
+ break;
+ default:
+ throw new IllegalArgumentException("Unrecognized System Permission: " + perm);
+ }
+ }
+
+ private void verifyHasOnlyTheseSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException,
+ AccumuloSecurityException {
+ List<SystemPermission> permList = Arrays.asList(perms);
+ for (SystemPermission p : SystemPermission.values()) {
+ if (permList.contains(p)) {
+ // should have these
+ if (!root_conn.securityOperations().hasSystemPermission(user, p))
+ throw new IllegalStateException(user + " SHOULD have system permission " + p);
+ } else {
+ // should not have these
+ if (root_conn.securityOperations().hasSystemPermission(user, p))
+ throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
+ }
+ }
+ }
+
+ private void verifyHasNoSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException, AccumuloSecurityException {
+ for (SystemPermission p : perms)
+ if (root_conn.securityOperations().hasSystemPermission(user, p))
+ throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
+ }
+
+ @Test
+ public void tablePermissionTest() throws Exception {
+ // create the test user
+ ClusterUser testUser = getUser(0), rootUser = getAdminUser();
+
+ String principal = testUser.getPrincipal();
+ AuthenticationToken token = testUser.getToken();
+ PasswordToken passwordToken = null;
+ if (token instanceof PasswordToken) {
+ passwordToken = (PasswordToken) token;
+ }
+ loginAs(rootUser);
+ Connector c = getConnector();
+ c.securityOperations().createLocalUser(principal, passwordToken);
+ loginAs(testUser);
+ Connector test_user_conn = c.getInstance().getConnector(principal, token);
+
+ // check for read-only access to metadata table
+ loginAs(rootUser);
+ verifyHasOnlyTheseTablePermissions(c, c.whoami(), MetadataTable.NAME, TablePermission.READ, TablePermission.ALTER_TABLE);
+ verifyHasOnlyTheseTablePermissions(c, principal, MetadataTable.NAME, TablePermission.READ);
+ String tableName = getUniqueNames(1)[0] + "__TABLE_PERMISSION_TEST__";
+
+ // test each permission
+ for (TablePermission perm : TablePermission.values()) {
+ log.debug("Verifying the " + perm + " permission");
+
+ // test permission before and after granting it
+ createTestTable(c, principal, tableName);
+ loginAs(testUser);
+ testMissingTablePermission(test_user_conn, testUser, perm, tableName);
+ loginAs(rootUser);
+ c.securityOperations().grantTablePermission(principal, tableName, perm);
+ verifyHasOnlyTheseTablePermissions(c, principal, tableName, perm);
+ loginAs(testUser);
+ testGrantedTablePermission(test_user_conn, testUser, perm, tableName);
+
+ loginAs(rootUser);
+ createTestTable(c, principal, tableName);
+ c.securityOperations().revokeTablePermission(principal, tableName, perm);
+ verifyHasNoTablePermissions(c, principal, tableName, perm);
+ }
+ }
+
+ private void createTestTable(Connector c, String testUser, String tableName) throws Exception, MutationsRejectedException {
+ if (!c.tableOperations().exists(tableName)) {
+ // create the test table
+ c.tableOperations().create(tableName);
+ // put in some initial data
+ BatchWriter writer = c.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation(new Text("row"));
+ m.put(new Text("cf"), new Text("cq"), new Value("val".getBytes()));
+ writer.addMutation(m);
+ writer.close();
+
+ // verify proper permissions for creator and test user
+ verifyHasOnlyTheseTablePermissions(c, c.whoami(), tableName, TablePermission.values());
+ verifyHasNoTablePermissions(c, testUser, tableName, TablePermission.values());
+
+ }
+ }
+
+ private void testMissingTablePermission(Connector test_user_conn, ClusterUser testUser, TablePermission perm, String tableName) throws Exception {
+ Scanner scanner;
+ BatchWriter writer;
+ Mutation m;
+ log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
+
+ // test permission prior to granting it
+ switch (perm) {
+ case READ:
+ try {
+ scanner = test_user_conn.createScanner(tableName, Authorizations.EMPTY);
+ int i = 0;
+ for (Entry<Key,Value> entry : scanner)
+ i += 1 + entry.getKey().getRowData().length();
+ if (i != 0)
+ throw new IllegalStateException("Should NOT be able to read from the table");
+ } catch (RuntimeException e) {
+ AccumuloSecurityException se = (AccumuloSecurityException) e.getCause();
+ if (se.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+ throw se;
+ }
+ break;
+ case WRITE:
+ try {
+ writer = test_user_conn.createBatchWriter(tableName, new BatchWriterConfig());
+ m = new Mutation(new Text("row"));
+ m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
+ writer.addMutation(m);
+ try {
+ writer.close();
+ } catch (MutationsRejectedException e1) {
+ if (e1.getSecurityErrorCodes().size() > 0)
+ throw new AccumuloSecurityException(test_user_conn.whoami(), org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.PERMISSION_DENIED, e1);
+ }
+ throw new IllegalStateException("Should NOT be able to write to a table");
+ } catch (AccumuloSecurityException e) {
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+ throw e;
+ }
+ break;
+ case BULK_IMPORT:
+ // test for bulk import permission would go here
+ break;
+ case ALTER_TABLE:
+ Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
+ groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
+ try {
+ test_user_conn.tableOperations().setLocalityGroups(tableName, groups);
+ throw new IllegalStateException("User should not be able to set locality groups");
+ } catch (AccumuloSecurityException e) {
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+ throw e;
+ }
+ break;
+ case DROP_TABLE:
+ try {
+ test_user_conn.tableOperations().delete(tableName);
+ throw new IllegalStateException("User should not be able delete the table");
+ } catch (AccumuloSecurityException e) {
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+ throw e;
+ }
+ break;
+ case GRANT:
+ try {
+ test_user_conn.securityOperations().grantTablePermission(getAdminPrincipal(), tableName, TablePermission.GRANT);
+ throw new IllegalStateException("User should not be able grant permissions");
+ } catch (AccumuloSecurityException e) {
+ if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+ throw e;
+ }
+ break;
+ default:
+ throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
+ }
+ }
+
+ private void testGrantedTablePermission(Connector test_user_conn, ClusterUser normalUser, TablePermission perm, String tableName) throws AccumuloException,
+ TableExistsException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
+ Scanner scanner;
+ BatchWriter writer;
+ Mutation m;
+ log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
+
+ // test permission after granting it
+ switch (perm) {
+ case READ:
+ scanner = test_user_conn.createScanner(tableName, Authorizations.EMPTY);
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+ while (iter.hasNext())
+ iter.next();
+ break;
+ case WRITE:
+ writer = test_user_conn.createBatchWriter(tableName, new BatchWriterConfig());
+ m = new Mutation(new Text("row"));
+ m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
+ writer.addMutation(m);
+ writer.close();
+ break;
+ case BULK_IMPORT:
+ // test for bulk import permission would go here
+ break;
+ case ALTER_TABLE:
+ Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
+ groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
+ break;
+ case DROP_TABLE:
+ test_user_conn.tableOperations().delete(tableName);
+ break;
+ case GRANT:
+ test_user_conn.securityOperations().grantTablePermission(getAdminPrincipal(), tableName, TablePermission.GRANT);
+ break;
+ default:
+ throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
+ }
+ }
+
+ private void verifyHasOnlyTheseTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
+ AccumuloSecurityException {
+ List<TablePermission> permList = Arrays.asList(perms);
+ for (TablePermission p : TablePermission.values()) {
+ if (permList.contains(p)) {
+ // should have these
+ if (!root_conn.securityOperations().hasTablePermission(user, table, p))
+ throw new IllegalStateException(user + " SHOULD have table permission " + p + " for table " + table);
+ } else {
+ // should not have these
+ if (root_conn.securityOperations().hasTablePermission(user, table, p))
+ throw new IllegalStateException(user + " SHOULD NOT have table permission " + p + " for table " + table);
+ }
+ }
+ }
+
+ private void verifyHasNoTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
+ AccumuloSecurityException {
+ for (TablePermission p : perms)
+ if (root_conn.securityOperations().hasTablePermission(user, table, p))
+ throw new IllegalStateException(user + " SHOULD NOT have table permission " + p + " for table " + table);
+ }
+}
[23/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java b/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java
new file mode 100644
index 0000000..88224b5
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/replication/UnorderedWorkAssignerReplicationIT.java
@@ -0,0 +1,731 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.replication;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
+import org.apache.accumulo.core.protobuf.ProtobufUtil;
+import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection;
+import org.apache.accumulo.core.replication.ReplicationTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.master.replication.UnorderedWorkAssigner;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.minicluster.impl.ProcessReference;
+import org.apache.accumulo.server.replication.ReplicaSystemFactory;
+import org.apache.accumulo.server.replication.StatusUtil;
+import org.apache.accumulo.server.replication.proto.Replication.Status;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.accumulo.tserver.TabletServer;
+import org.apache.accumulo.tserver.replication.AccumuloReplicaSystem;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+
+public class UnorderedWorkAssignerReplicationIT extends ConfigurableMacBase {
+ private static final Logger log = LoggerFactory.getLogger(UnorderedWorkAssignerReplicationIT.class);
+
+ private ExecutorService executor;
+ private int timeoutFactor = 1;
+
+ @Before
+ public void createExecutor() {
+ executor = Executors.newSingleThreadExecutor();
+
+ try {
+ timeoutFactor = Integer.parseInt(System.getProperty("timeout.factor"));
+ } catch (NumberFormatException exception) {
+ log.warn("Could not parse timeout.factor, not increasing timeout.");
+ }
+
+ Assert.assertTrue("The timeout factor must be a positive, non-zero value", timeoutFactor > 0);
+ }
+
+ @After
+ public void stopExecutor() {
+ if (null != executor) {
+ executor.shutdownNow();
+ }
+ }
+
+ @Override
+ public int defaultTimeoutSeconds() {
+ return 60 * 5;
+ }
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setNumTservers(1);
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "10s");
+ cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "2M");
+ cfg.setProperty(Property.GC_CYCLE_START, "1s");
+ cfg.setProperty(Property.GC_CYCLE_DELAY, "5s");
+ cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
+ cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
+ cfg.setProperty(Property.REPLICATION_MAX_UNIT_SIZE, "8M");
+ cfg.setProperty(Property.REPLICATION_NAME, "master");
+ cfg.setProperty(Property.REPLICATION_WORK_ASSIGNER, UnorderedWorkAssigner.class.getName());
+ cfg.setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX, "1M");
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ /**
+ * Use the same SSL and credential provider configuration that is set up by AbstractMacIT for the other MAC used for replication
+ */
+ private void updatePeerConfigFromPrimary(MiniAccumuloConfigImpl primaryCfg, MiniAccumuloConfigImpl peerCfg) {
+ // Set the same SSL information from the primary when present
+ Map<String,String> primarySiteConfig = primaryCfg.getSiteConfig();
+ if ("true".equals(primarySiteConfig.get(Property.INSTANCE_RPC_SSL_ENABLED.getKey()))) {
+ Map<String,String> peerSiteConfig = new HashMap<String,String>();
+ peerSiteConfig.put(Property.INSTANCE_RPC_SSL_ENABLED.getKey(), "true");
+ String keystorePath = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PATH.getKey());
+ Assert.assertNotNull("Keystore Path was null", keystorePath);
+ peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PATH.getKey(), keystorePath);
+ String truststorePath = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PATH.getKey());
+ Assert.assertNotNull("Truststore Path was null", truststorePath);
+ peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PATH.getKey(), truststorePath);
+
+ // Passwords might be stored in CredentialProvider
+ String keystorePassword = primarySiteConfig.get(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey());
+ if (null != keystorePassword) {
+ peerSiteConfig.put(Property.RPC_SSL_KEYSTORE_PASSWORD.getKey(), keystorePassword);
+ }
+ String truststorePassword = primarySiteConfig.get(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey());
+ if (null != truststorePassword) {
+ peerSiteConfig.put(Property.RPC_SSL_TRUSTSTORE_PASSWORD.getKey(), truststorePassword);
+ }
+
+ System.out.println("Setting site configuration for peer " + peerSiteConfig);
+ peerCfg.setSiteConfig(peerSiteConfig);
+ }
+
+ // Use the CredentialProvider if the primary also uses one
+ String credProvider = primarySiteConfig.get(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey());
+ if (null != credProvider) {
+ Map<String,String> peerSiteConfig = peerCfg.getSiteConfig();
+ peerSiteConfig.put(Property.GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS.getKey(), credProvider);
+ peerCfg.setSiteConfig(peerSiteConfig);
+ }
+ }
+
+ @Test
+ public void dataWasReplicatedToThePeer() throws Exception {
+ MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
+ ROOT_PASSWORD);
+ peerCfg.setNumTservers(1);
+ peerCfg.setInstanceName("peer");
+ updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
+ peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
+ MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
+
+ peerCluster.start();
+
+ try {
+ final Connector connMaster = getConnector();
+ final Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
+
+ ReplicationTable.setOnline(connMaster);
+
+ String peerUserName = "peer", peerPassword = "foo";
+
+ String peerClusterName = "peer";
+
+ connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
+
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
+
+ // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
+ connMaster.instanceOperations().setProperty(
+ Property.REPLICATION_PEERS.getKey() + peerClusterName,
+ ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
+ AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
+
+ final String masterTable = "master", peerTable = "peer";
+
+ connMaster.tableOperations().create(masterTable);
+ String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
+ Assert.assertNotNull(masterTableId);
+
+ connPeer.tableOperations().create(peerTable);
+ String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
+ Assert.assertNotNull(peerTableId);
+
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
+
+ // Replicate this table to the peerClusterName in a table with the peerTableId table id
+ connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
+
+ // Wait for zookeeper updates (configuration) to propagate
+ UtilWaitThread.sleep(3 * 1000);
+
+ // Write some data to table1
+ BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
+ for (int rows = 0; rows < 5000; rows++) {
+ Mutation m = new Mutation(Integer.toString(rows));
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ log.info("Wrote all data to master cluster");
+
+ final Set<String> filesNeedingReplication = connMaster.replicationOperations().referencedFiles(masterTable);
+
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+ cluster.exec(TabletServer.class);
+
+ log.info("TabletServer restarted");
+ Iterators.size(ReplicationTable.getScanner(connMaster).iterator());
+ log.info("TabletServer is online");
+
+ log.info("");
+ log.info("Fetching metadata records:");
+ for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+ if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
+ log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ } else {
+ log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
+ }
+ }
+
+ log.info("");
+ log.info("Fetching replication records:");
+ for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
+ log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ }
+
+ Future<Boolean> future = executor.submit(new Callable<Boolean>() {
+
+ @Override
+ public Boolean call() throws Exception {
+ connMaster.replicationOperations().drain(masterTable, filesNeedingReplication);
+ log.info("Drain completed");
+ return true;
+ }
+
+ });
+
+ long timeoutSeconds = timeoutFactor * 30;
+ try {
+ future.get(timeoutSeconds, TimeUnit.SECONDS);
+ } catch (TimeoutException e) {
+ future.cancel(true);
+ Assert.fail("Drain did not finish within " + timeoutSeconds + " seconds");
+ }
+
+ log.info("drain completed");
+
+ log.info("");
+ log.info("Fetching metadata records:");
+ for (Entry<Key,Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
+ if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
+ log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ } else {
+ log.info(kv.getKey().toStringNoTruncate() + " " + kv.getValue());
+ }
+ }
+
+ log.info("");
+ log.info("Fetching replication records:");
+ for (Entry<Key,Value> kv : ReplicationTable.getScanner(connMaster)) {
+ log.info(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ }
+
+ Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
+ Iterator<Entry<Key,Value>> masterIter = master.iterator(), peerIter = peer.iterator();
+ Entry<Key,Value> masterEntry = null, peerEntry = null;
+ while (masterIter.hasNext() && peerIter.hasNext()) {
+ masterEntry = masterIter.next();
+ peerEntry = peerIter.next();
+ Assert.assertEquals(masterEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0,
+ masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
+ Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
+ }
+
+ log.info("Last master entry: " + masterEntry);
+ log.info("Last peer entry: " + peerEntry);
+
+ Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
+ Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
+ } finally {
+ peerCluster.stop();
+ }
+ }
+
+ @Test
+ public void dataReplicatedToCorrectTable() throws Exception {
+ MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
+ ROOT_PASSWORD);
+ peerCfg.setNumTservers(1);
+ peerCfg.setInstanceName("peer");
+ updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
+ peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
+ MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);
+
+ peer1Cluster.start();
+
+ try {
+ Connector connMaster = getConnector();
+ Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
+
+ String peerClusterName = "peer";
+ String peerUserName = "peer", peerPassword = "foo";
+
+ // Create local user
+ connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
+
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
+
+ // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
+ connMaster.instanceOperations().setProperty(
+ Property.REPLICATION_PEERS.getKey() + peerClusterName,
+ ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
+ AccumuloReplicaSystem.buildConfiguration(peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));
+
+ String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2";
+
+ // Create tables
+ connMaster.tableOperations().create(masterTable1);
+ String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
+ Assert.assertNotNull(masterTableId1);
+
+ connMaster.tableOperations().create(masterTable2);
+ String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
+ Assert.assertNotNull(masterTableId2);
+
+ connPeer.tableOperations().create(peerTable1);
+ String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
+ Assert.assertNotNull(peerTableId1);
+
+ connPeer.tableOperations().create(peerTable2);
+ String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
+ Assert.assertNotNull(peerTableId2);
+
+ // Grant write permission
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);
+
+ // Replicate this table to the peerClusterName in a table with the peerTableId table id
+ connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1);
+
+ connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2);
+
+ // Wait for zookeeper updates (configuration) to propogate
+ UtilWaitThread.sleep(3 * 1000);
+
+ // Write some data to table1
+ BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
+ long masterTable1Records = 0l;
+ for (int rows = 0; rows < 2500; rows++) {
+ Mutation m = new Mutation(masterTable1 + rows);
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ masterTable1Records++;
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ // Write some data to table2
+ bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
+ long masterTable2Records = 0l;
+ for (int rows = 0; rows < 2500; rows++) {
+ Mutation m = new Mutation(masterTable2 + rows);
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ masterTable2Records++;
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ log.info("Wrote all data to master cluster");
+
+ Set<String> filesFor1 = connMaster.replicationOperations().referencedFiles(masterTable1), filesFor2 = connMaster.replicationOperations().referencedFiles(
+ masterTable2);
+
+ while (!ReplicationTable.isOnline(connMaster)) {
+ Thread.sleep(500);
+ }
+
+ // Restart the tserver to force a close on the WAL
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+ cluster.exec(TabletServer.class);
+
+ log.info("Restarted the tserver");
+
+ // Read the data -- the tserver is back up and running
+ Iterators.size(connMaster.createScanner(masterTable1, Authorizations.EMPTY).iterator());
+
+ // Wait for both tables to be replicated
+ log.info("Waiting for {} for {}", filesFor1, masterTable1);
+ connMaster.replicationOperations().drain(masterTable1, filesFor1);
+
+ log.info("Waiting for {} for {}", filesFor2, masterTable2);
+ connMaster.replicationOperations().drain(masterTable2, filesFor2);
+
+ long countTable = 0l;
+ for (int i = 0; i < 5; i++) {
+ countTable = 0l;
+ for (Entry<Key,Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
+ countTable++;
+ Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
+ .startsWith(masterTable1));
+ }
+
+ log.info("Found {} records in {}", countTable, peerTable1);
+
+ if (masterTable1Records != countTable) {
+ log.warn("Did not find {} expected records in {}, only found {}", masterTable1Records, peerTable1, countTable);
+ }
+ }
+
+ Assert.assertEquals(masterTable1Records, countTable);
+
+ for (int i = 0; i < 5; i++) {
+ countTable = 0l;
+ for (Entry<Key,Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
+ countTable++;
+ Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
+ .startsWith(masterTable2));
+ }
+
+ log.info("Found {} records in {}", countTable, peerTable2);
+
+ if (masterTable2Records != countTable) {
+ log.warn("Did not find {} expected records in {}, only found {}", masterTable2Records, peerTable2, countTable);
+ }
+ }
+
+ Assert.assertEquals(masterTable2Records, countTable);
+
+ } finally {
+ peer1Cluster.stop();
+ }
+ }
+
+ @Test
+ public void dataWasReplicatedToThePeerWithoutDrain() throws Exception {
+ MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
+ ROOT_PASSWORD);
+ peerCfg.setNumTservers(1);
+ peerCfg.setInstanceName("peer");
+ updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
+ peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
+ MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
+
+ peerCluster.start();
+
+ Connector connMaster = getConnector();
+ Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
+
+ String peerUserName = "repl";
+ String peerPassword = "passwd";
+
+ // Create a user on the peer for replication to use
+ connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
+
+ String peerClusterName = "peer";
+
+ // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
+ connMaster.instanceOperations().setProperty(
+ Property.REPLICATION_PEERS.getKey() + peerClusterName,
+ ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
+ AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
+
+ // Configure the credentials we should use to authenticate ourselves to the peer for replication
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
+
+ String masterTable = "master", peerTable = "peer";
+
+ connMaster.tableOperations().create(masterTable);
+ String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
+ Assert.assertNotNull(masterTableId);
+
+ connPeer.tableOperations().create(peerTable);
+ String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
+ Assert.assertNotNull(peerTableId);
+
+ // Give our replication user the ability to write to the table
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
+
+ // Replicate this table to the peerClusterName in a table with the peerTableId table id
+ connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
+
+ // Write some data to table1
+ BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
+ for (int rows = 0; rows < 5000; rows++) {
+ Mutation m = new Mutation(Integer.toString(rows));
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ log.info("Wrote all data to master cluster");
+
+ Set<String> files = connMaster.replicationOperations().referencedFiles(masterTable);
+ for (String s : files) {
+ log.info("Found referenced file for " + masterTable + ": " + s);
+ }
+
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+
+ cluster.exec(TabletServer.class);
+
+ Iterators.size(connMaster.createScanner(masterTable, Authorizations.EMPTY).iterator());
+
+ for (Entry<Key,Value> kv : connMaster.createScanner(ReplicationTable.NAME, Authorizations.EMPTY)) {
+ log.debug(kv.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
+ }
+
+ connMaster.replicationOperations().drain(masterTable, files);
+
+ Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY), peer = connPeer.createScanner(peerTable, Authorizations.EMPTY);
+ Iterator<Entry<Key,Value>> masterIter = master.iterator(), peerIter = peer.iterator();
+ Assert.assertTrue("No data in master table", masterIter.hasNext());
+ Assert.assertTrue("No data in peer table", peerIter.hasNext());
+ while (masterIter.hasNext() && peerIter.hasNext()) {
+ Entry<Key,Value> masterEntry = masterIter.next(), peerEntry = peerIter.next();
+ Assert.assertEquals(peerEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0,
+ masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
+ Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
+ }
+
+ Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
+ Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
+
+ peerCluster.stop();
+ }
+
+ @Test
+ public void dataReplicatedToCorrectTableWithoutDrain() throws Exception {
+ MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"),
+ ROOT_PASSWORD);
+ peerCfg.setNumTservers(1);
+ peerCfg.setInstanceName("peer");
+ updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
+ peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
+ MiniAccumuloClusterImpl peer1Cluster = new MiniAccumuloClusterImpl(peerCfg);
+
+ peer1Cluster.start();
+
+ try {
+ Connector connMaster = getConnector();
+ Connector connPeer = peer1Cluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
+
+ String peerClusterName = "peer";
+
+ String peerUserName = "repl";
+ String peerPassword = "passwd";
+
+ // Create a user on the peer for replication to use
+ connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
+
+ // Configure the credentials we should use to authenticate ourselves to the peer for replication
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
+ connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
+
+ // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
+ connMaster.instanceOperations().setProperty(
+ Property.REPLICATION_PEERS.getKey() + peerClusterName,
+ ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
+ AccumuloReplicaSystem.buildConfiguration(peer1Cluster.getInstanceName(), peer1Cluster.getZooKeepers())));
+
+ String masterTable1 = "master1", peerTable1 = "peer1", masterTable2 = "master2", peerTable2 = "peer2";
+
+ connMaster.tableOperations().create(masterTable1);
+ String masterTableId1 = connMaster.tableOperations().tableIdMap().get(masterTable1);
+ Assert.assertNotNull(masterTableId1);
+
+ connMaster.tableOperations().create(masterTable2);
+ String masterTableId2 = connMaster.tableOperations().tableIdMap().get(masterTable2);
+ Assert.assertNotNull(masterTableId2);
+
+ connPeer.tableOperations().create(peerTable1);
+ String peerTableId1 = connPeer.tableOperations().tableIdMap().get(peerTable1);
+ Assert.assertNotNull(peerTableId1);
+
+ connPeer.tableOperations().create(peerTable2);
+ String peerTableId2 = connPeer.tableOperations().tableIdMap().get(peerTable2);
+ Assert.assertNotNull(peerTableId2);
+
+ // Give our replication user the ability to write to the tables
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable1, TablePermission.WRITE);
+ connPeer.securityOperations().grantTablePermission(peerUserName, peerTable2, TablePermission.WRITE);
+
+ // Replicate this table to the peerClusterName in a table with the peerTableId table id
+ connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable1, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId1);
+
+ connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION.getKey(), "true");
+ connMaster.tableOperations().setProperty(masterTable2, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId2);
+
+ // Wait for zookeeper updates (configuration) to propagate
+ UtilWaitThread.sleep(3 * 1000);
+
+ // Write some data to table1
+ BatchWriter bw = connMaster.createBatchWriter(masterTable1, new BatchWriterConfig());
+ for (int rows = 0; rows < 2500; rows++) {
+ Mutation m = new Mutation(masterTable1 + rows);
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ // Write some data to table2
+ bw = connMaster.createBatchWriter(masterTable2, new BatchWriterConfig());
+ for (int rows = 0; rows < 2500; rows++) {
+ Mutation m = new Mutation(masterTable2 + rows);
+ for (int cols = 0; cols < 100; cols++) {
+ String value = Integer.toString(cols);
+ m.put(value, "", value);
+ }
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ log.info("Wrote all data to master cluster");
+
+ while (!ReplicationTable.isOnline(connMaster)) {
+ Thread.sleep(500);
+ }
+
+ for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
+ cluster.killProcess(ServerType.TABLET_SERVER, proc);
+ }
+
+ cluster.exec(TabletServer.class);
+
+ // Wait until we fully replicated something
+ boolean fullyReplicated = false;
+ for (int i = 0; i < 10 && !fullyReplicated; i++) {
+ UtilWaitThread.sleep(timeoutFactor * 2000);
+
+ Scanner s = ReplicationTable.getScanner(connMaster);
+ WorkSection.limit(s);
+ for (Entry<Key,Value> entry : s) {
+ Status status = Status.parseFrom(entry.getValue().get());
+ if (StatusUtil.isFullyReplicated(status)) {
+ fullyReplicated |= true;
+ }
+ }
+ }
+
+ Assert.assertNotEquals(0, fullyReplicated);
+
+ long countTable = 0l;
+
+ // Check a few times
+ for (int i = 0; i < 10; i++) {
+ countTable = 0l;
+ for (Entry<Key,Value> entry : connPeer.createScanner(peerTable1, Authorizations.EMPTY)) {
+ countTable++;
+ Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
+ .startsWith(masterTable1));
+ }
+ log.info("Found {} records in {}", countTable, peerTable1);
+ if (0 < countTable) {
+ break;
+ }
+ Thread.sleep(2000);
+ }
+
+ Assert.assertTrue("Did not find any records in " + peerTable1 + " on peer", countTable > 0);
+
+ for (int i = 0; i < 10; i++) {
+ countTable = 0l;
+ for (Entry<Key,Value> entry : connPeer.createScanner(peerTable2, Authorizations.EMPTY)) {
+ countTable++;
+ Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString()
+ .startsWith(masterTable2));
+ }
+
+ log.info("Found {} records in {}", countTable, peerTable2);
+ if (0 < countTable) {
+ break;
+ }
+ Thread.sleep(2000);
+ }
+ Assert.assertTrue("Did not find any records in " + peerTable2 + " on peer", countTable > 0);
+
+ } finally {
+ peer1Cluster.stop();
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java b/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
new file mode 100644
index 0000000..59197de
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/replication/UnusedWalDoesntCloseReplicationStatusIT.java
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.replication;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.accumulo.tserver.logger.LogEvents.OPEN;
+
+import java.io.DataOutputStream;
+import java.io.File;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map.Entry;
+import java.util.UUID;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.impl.KeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.protobuf.ProtobufUtil;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.data.ServerMutation;
+import org.apache.accumulo.server.replication.ReplicaSystemFactory;
+import org.apache.accumulo.server.replication.StatusUtil;
+import org.apache.accumulo.server.replication.proto.Replication.Status;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.apache.accumulo.tserver.log.DfsLogger;
+import org.apache.accumulo.tserver.logger.LogEvents;
+import org.apache.accumulo.tserver.logger.LogFileKey;
+import org.apache.accumulo.tserver.logger.LogFileValue;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.Iterables;
+
+public class UnusedWalDoesntCloseReplicationStatusIT extends ConfigurableMacBase {
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
+ cfg.setNumTservers(1);
+ }
+
+ @Test
+ public void test() throws Exception {
+ File accumuloDir = this.getCluster().getConfig().getAccumuloDir();
+ final Connector conn = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+
+ conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
+ conn.tableOperations().create(tableName);
+
+ final String tableId = conn.tableOperations().tableIdMap().get(tableName);
+ final int numericTableId = Integer.parseInt(tableId);
+ final int fakeTableId = numericTableId + 1;
+
+ Assert.assertNotNull("Did not find table ID", tableId);
+
+ conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION.getKey(), "true");
+ conn.tableOperations().setProperty(tableName, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
+ // just sleep
+ conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
+ ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));
+
+ FileSystem fs = FileSystem.getLocal(new Configuration());
+ File tserverWalDir = new File(accumuloDir, ServerConstants.WAL_DIR + Path.SEPARATOR + "faketserver+port");
+ File tserverWal = new File(tserverWalDir, UUID.randomUUID().toString());
+ fs.mkdirs(new Path(tserverWalDir.getAbsolutePath()));
+
+ // Make a fake WAL with no data in it for our real table
+ FSDataOutputStream out = fs.create(new Path(tserverWal.getAbsolutePath()));
+
+ out.write(DfsLogger.LOG_FILE_HEADER_V3.getBytes(UTF_8));
+
+ DataOutputStream dos = new DataOutputStream(out);
+ dos.writeUTF("NullCryptoModule");
+
+ // Fake a single update WAL that has a mutation for another table
+ LogFileKey key = new LogFileKey();
+ LogFileValue value = new LogFileValue();
+
+ key.event = OPEN;
+ key.tserverSession = tserverWal.getAbsolutePath();
+ key.filename = tserverWal.getAbsolutePath();
+ key.write(out);
+ value.write(out);
+
+ key.event = LogEvents.DEFINE_TABLET;
+ key.tablet = new KeyExtent(new Text(Integer.toString(fakeTableId)), null, null);
+ key.seq = 1l;
+ key.tid = 1;
+
+ key.write(dos);
+ value.write(dos);
+
+ key.tablet = null;
+ key.event = LogEvents.MUTATION;
+ key.filename = tserverWal.getAbsolutePath();
+ value.mutations = Arrays.<Mutation> asList(new ServerMutation(new Text("row")));
+
+ key.write(dos);
+ value.write(dos);
+
+ key.event = LogEvents.COMPACTION_START;
+ key.filename = accumuloDir.getAbsolutePath() + "/tables/" + fakeTableId + "/t-000001/A000001.rf";
+ value.mutations = Collections.emptyList();
+
+ key.write(dos);
+ value.write(dos);
+
+ key.event = LogEvents.COMPACTION_FINISH;
+ value.mutations = Collections.emptyList();
+
+ key.write(dos);
+ value.write(dos);
+
+ dos.close();
+
+ BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("m");
+ m.put("m", "m", "M");
+ bw.addMutation(m);
+ bw.close();
+
+ log.info("State of metadata table after inserting a record");
+
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
+ for (Entry<Key,Value> entry : s) {
+ System.out.println(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
+ }
+
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.ReplicationSection.getRange());
+ for (Entry<Key,Value> entry : s) {
+ System.out.println(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
+ }
+
+ log.info("Offline'ing table");
+
+ conn.tableOperations().offline(tableName, true);
+
+ // Add our fake WAL to the log column for this table
+ String walUri = tserverWal.toURI().toString();
+ KeyExtent extent = new KeyExtent(new Text(tableId), null, null);
+ bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+ m = new Mutation(extent.getMetadataEntry());
+ m.put(MetadataSchema.TabletsSection.LogColumnFamily.NAME, new Text("localhost:12345/" + walUri), new Value((walUri + "|1").getBytes(UTF_8)));
+ bw.addMutation(m);
+
+ // Add a replication entry for our fake WAL
+ m = new Mutation(MetadataSchema.ReplicationSection.getRowPrefix() + new Path(walUri).toString());
+ m.put(MetadataSchema.ReplicationSection.COLF, new Text(tableId), new Value(StatusUtil.fileCreated(System.currentTimeMillis()).toByteArray()));
+ bw.addMutation(m);
+ bw.close();
+
+ log.info("State of metadata after injecting WAL manually");
+
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
+ for (Entry<Key,Value> entry : s) {
+ log.info(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
+ }
+
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.ReplicationSection.getRange());
+ for (Entry<Key,Value> entry : s) {
+ log.info(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
+ }
+
+ log.info("Bringing table online");
+ conn.tableOperations().online(tableName, true);
+
+ Assert.assertEquals(1, Iterables.size(conn.createScanner(tableName, Authorizations.EMPTY)));
+
+ log.info("Table has performed recovery, state of metadata:");
+
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
+ for (Entry<Key,Value> entry : s) {
+ log.info(entry.getKey().toStringNoTruncate() + " " + entry.getValue());
+ }
+
+ s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(MetadataSchema.ReplicationSection.getRange());
+ for (Entry<Key,Value> entry : s) {
+ Status status = Status.parseFrom(entry.getValue().get());
+ log.info(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(status));
+ Assert.assertFalse("Status record was closed and it should not be", status.getClosed());
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/server/security/SystemCredentialsIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/server/security/SystemCredentialsIT.java b/test/src/main/java/org/apache/accumulo/test/server/security/SystemCredentialsIT.java
new file mode 100644
index 0000000..9752916
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/server/security/SystemCredentialsIT.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.server.security;
+
+import static org.junit.Assert.assertEquals;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.impl.Credentials;
+import org.apache.accumulo.core.client.security.SecurityErrorCode;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.security.SystemCredentials;
+import org.apache.accumulo.test.functional.ConfigurableMacBase;
+import org.junit.Test;
+
+public class SystemCredentialsIT extends ConfigurableMacBase {
+
+ private static final int FAIL_CODE = 7, BAD_PASSWD_FAIL_CODE = 8;
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 1 * 60;
+ }
+
+ @Test
+ public void testSystemCredentials() throws Exception {
+ assertEquals(0, exec(SystemCredentialsIT.class, "good", getCluster().getZooKeepers()).waitFor());
+ assertEquals(FAIL_CODE, exec(SystemCredentialsIT.class, "bad", getCluster().getZooKeepers()).waitFor());
+ assertEquals(BAD_PASSWD_FAIL_CODE, exec(SystemCredentialsIT.class, "bad_password", getCluster().getZooKeepers()).waitFor());
+ }
+
+ public static void main(final String[] args) throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
+ Credentials creds = null;
+ if (args.length < 2)
+ throw new RuntimeException("Incorrect usage; expected to be run by test only");
+ if (args[0].equals("bad")) {
+ Instance inst = new Instance() {
+
+ @Deprecated
+ @Override
+ public void setConfiguration(AccumuloConfiguration conf) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int getZooKeepersSessionTimeOut() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getZooKeepers() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getRootTabletLocation() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<String> getMasterLocations() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getInstanceName() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getInstanceID() {
+ return SystemCredentials.class.getName();
+ }
+
+ @Override
+ public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Deprecated
+ @Override
+ public Connector getConnector(String user, CharSequence pass) throws AccumuloException, AccumuloSecurityException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Deprecated
+ @Override
+ public Connector getConnector(String user, ByteBuffer pass) throws AccumuloException, AccumuloSecurityException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Deprecated
+ @Override
+ public Connector getConnector(String user, byte[] pass) throws AccumuloException, AccumuloSecurityException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Deprecated
+ @Override
+ public AccumuloConfiguration getConfiguration() {
+ throw new UnsupportedOperationException();
+ }
+
+ };
+ creds = SystemCredentials.get(inst);
+ } else if (args[0].equals("good")) {
+ creds = SystemCredentials.get(HdfsZooInstance.getInstance());
+ } else if (args[0].equals("bad_password")) {
+ Instance inst = new Instance() {
+
+ @Override
+ public int getZooKeepersSessionTimeOut() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getZooKeepers() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getRootTabletLocation() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<String> getMasterLocations() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getInstanceName() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public String getInstanceID() {
+ return SystemCredentials.class.getName();
+ }
+
+ @Override
+ public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Deprecated
+ @Override
+ public Connector getConnector(String user, CharSequence pass) throws AccumuloException, AccumuloSecurityException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Deprecated
+ @Override
+ public Connector getConnector(String user, ByteBuffer pass) throws AccumuloException, AccumuloSecurityException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Deprecated
+ @Override
+ public Connector getConnector(String user, byte[] pass) throws AccumuloException, AccumuloSecurityException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Deprecated
+ @Override
+ public AccumuloConfiguration getConfiguration() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Deprecated
+ @Override
+ public void setConfiguration(AccumuloConfiguration conf) {
+ throw new UnsupportedOperationException();
+ }
+
+ };
+ creds = new SystemCredentials(inst, "!SYSTEM", new PasswordToken("fake"));
+ } else {
+ throw new RuntimeException("Incorrect usage; expected to be run by test only");
+ }
+ Instance instance = HdfsZooInstance.getInstance();
+ Connector conn;
+ try {
+ conn = instance.getConnector(creds.getPrincipal(), creds.getToken());
+ } catch (AccumuloSecurityException e) {
+ e.printStackTrace(System.err);
+ System.exit(BAD_PASSWD_FAIL_CODE);
+ return;
+ }
+ try {
+ Scanner scan = conn.createScanner(RootTable.NAME, Authorizations.EMPTY);
+ for (Entry<Key,Value> e : scan) {
+ e.hashCode();
+ }
+ } catch (RuntimeException e) {
+ // catch the runtime exception from the scanner iterator
+ if (e.getCause() instanceof AccumuloSecurityException
+ && ((AccumuloSecurityException) e.getCause()).getSecurityErrorCode() == SecurityErrorCode.BAD_CREDENTIALS) {
+ e.printStackTrace(System.err);
+ System.exit(FAIL_CODE);
+ }
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java b/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java
new file mode 100644
index 0000000..f7f250a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/start/KeywordStartIT.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.start;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.file.rfile.PrintInfo;
+import org.apache.accumulo.core.util.Classpath;
+import org.apache.accumulo.core.util.CreateToken;
+import org.apache.accumulo.core.util.Help;
+import org.apache.accumulo.core.util.Jar;
+import org.apache.accumulo.core.util.Version;
+import org.apache.accumulo.gc.GCExecutable;
+import org.apache.accumulo.gc.SimpleGarbageCollector;
+import org.apache.accumulo.master.Master;
+import org.apache.accumulo.master.MasterExecutable;
+import org.apache.accumulo.minicluster.MiniAccumuloRunner;
+import org.apache.accumulo.minicluster.impl.MiniClusterExecutable;
+import org.apache.accumulo.monitor.Monitor;
+import org.apache.accumulo.monitor.MonitorExecutable;
+import org.apache.accumulo.proxy.Proxy;
+import org.apache.accumulo.server.init.Initialize;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.accumulo.server.util.Info;
+import org.apache.accumulo.server.util.LoginProperties;
+import org.apache.accumulo.server.util.ZooKeeperMain;
+import org.apache.accumulo.shell.Shell;
+import org.apache.accumulo.start.Main;
+import org.apache.accumulo.start.spi.KeywordExecutable;
+import org.apache.accumulo.tracer.TraceServer;
+import org.apache.accumulo.tracer.TracerExecutable;
+import org.apache.accumulo.tserver.TServerExecutable;
+import org.apache.accumulo.tserver.TabletServer;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class KeywordStartIT {
+
+ private final Logger log = LoggerFactory.getLogger(getClass());
+
+ @Test
+ public void testKeywordsMatch() throws IOException {
+ for (Entry<String,KeywordExecutable> entry : Main.getExecutables(getClass().getClassLoader()).entrySet()) {
+ assertEquals(entry.getKey(), entry.getValue().keyword());
+ }
+ }
+
+ @Test
+ public void testCheckDuplicates() {
+ NoOp one = new NoOp("one");
+ NoOp anotherOne = new NoOp("another");
+ NoOp two = new NoOp("two");
+ NoOp three = new NoOp("three");
+ List<NoOp> services = Arrays.asList(one, three, two, two, three, three, anotherOne);
+ assertEquals(7, services.size());
+ Map<String,KeywordExecutable> results = Main.checkDuplicates(services);
+ assertTrue(results.containsKey(one.keyword()));
+ assertTrue(results.containsKey(anotherOne.keyword()));
+ assertFalse(results.containsKey(two.keyword()));
+ assertFalse(results.containsKey(three.keyword()));
+ assertEquals(2, results.size());
+ }
+
+ // Note: this test may fail in Eclipse, if the services files haven't been generated by the AutoService annotation processor
+ @Test
+ public void testExpectedClasses() throws IOException {
+ TreeMap<String,Class<? extends KeywordExecutable>> expectSet = new TreeMap<>();
+ expectSet.put("admin", Admin.class);
+ expectSet.put("classpath", Classpath.class);
+ expectSet.put("create-token", CreateToken.class);
+ expectSet.put("gc", GCExecutable.class);
+ expectSet.put("help", Help.class);
+ expectSet.put("info", Info.class);
+ expectSet.put("init", Initialize.class);
+ expectSet.put("jar", Jar.class);
+ expectSet.put("login-info", LoginProperties.class);
+ expectSet.put("master", MasterExecutable.class);
+ expectSet.put("minicluster", MiniClusterExecutable.class);
+ expectSet.put("monitor", MonitorExecutable.class);
+ expectSet.put("proxy", Proxy.class);
+ expectSet.put("rfile-info", PrintInfo.class);
+ expectSet.put("shell", Shell.class);
+ expectSet.put("tracer", TracerExecutable.class);
+ expectSet.put("tserver", TServerExecutable.class);
+ expectSet.put("version", Version.class);
+ expectSet.put("zookeeper", ZooKeeperMain.class);
+
+ Iterator<Entry<String,Class<? extends KeywordExecutable>>> expectIter = expectSet.entrySet().iterator();
+ TreeMap<String,KeywordExecutable> actualSet = new TreeMap<>(Main.getExecutables(getClass().getClassLoader()));
+ Iterator<Entry<String,KeywordExecutable>> actualIter = actualSet.entrySet().iterator();
+ Entry<String,Class<? extends KeywordExecutable>> expected;
+ Entry<String,KeywordExecutable> actual;
+ while (expectIter.hasNext() && actualIter.hasNext()) {
+ expected = expectIter.next();
+ actual = actualIter.next();
+ assertEquals(expected.getKey(), actual.getKey());
+ assertEquals(expected.getValue(), actual.getValue().getClass());
+ }
+ boolean moreExpected = expectIter.hasNext();
+ if (moreExpected) {
+ while (expectIter.hasNext()) {
+ log.warn("Missing class for keyword '" + expectIter.next() + "'");
+ }
+ }
+ assertFalse("Missing expected classes", moreExpected);
+ boolean moreActual = actualIter.hasNext();
+ if (moreActual) {
+ while (actualIter.hasNext()) {
+ log.warn("Extra class found with keyword '" + actualIter.next() + "'");
+ }
+ }
+ assertFalse("Found additional unexpected classes", moreActual);
+ }
+
+ @Test
+ public void checkHasMain() {
+ assertFalse("Sanity check for test failed. Somehow the test class has a main method", hasMain(this.getClass()));
+
+ HashSet<Class<?>> expectSet = new HashSet<>();
+ expectSet.add(Admin.class);
+ expectSet.add(CreateToken.class);
+ expectSet.add(Info.class);
+ expectSet.add(Initialize.class);
+ expectSet.add(LoginProperties.class);
+ expectSet.add(Master.class);
+ expectSet.add(MiniAccumuloRunner.class);
+ expectSet.add(Monitor.class);
+ expectSet.add(PrintInfo.class);
+ expectSet.add(Proxy.class);
+ expectSet.add(Shell.class);
+ expectSet.add(SimpleGarbageCollector.class);
+ expectSet.add(TabletServer.class);
+ expectSet.add(TraceServer.class);
+ expectSet.add(ZooKeeperMain.class);
+
+ for (Class<?> c : expectSet) {
+ assertTrue("Class " + c.getName() + " is missing a main method!", hasMain(c));
+ }
+
+ }
+
+ private static boolean hasMain(Class<?> classToCheck) {
+ Method main;
+ try {
+ main = classToCheck.getMethod("main", new String[0].getClass());
+ } catch (NoSuchMethodException e) {
+ return false;
+ }
+ return main != null && Modifier.isPublic(main.getModifiers()) && Modifier.isStatic(main.getModifiers());
+ }
+
+ private static class NoOp implements KeywordExecutable {
+
+ private final String kw;
+
+ public NoOp(String kw) {
+ this.kw = kw;
+ }
+
+ @Override
+ public String keyword() {
+ return kw;
+ }
+
+ @Override
+ public void execute(String[] args) throws Exception {}
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java b/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java
new file mode 100644
index 0000000..2345ea7
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/util/CertUtils.java
@@ -0,0 +1,348 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.util;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+import java.security.PrivateKey;
+import java.security.PublicKey;
+import java.security.Security;
+import java.security.UnrecoverableKeyException;
+import java.security.cert.Certificate;
+import java.security.cert.CertificateException;
+import java.util.Calendar;
+import java.util.Enumeration;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.TreeMap;
+
+import org.apache.accumulo.core.cli.Help;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.DefaultConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.conf.SiteConfiguration;
+import org.apache.commons.io.FileExistsException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.bouncycastle.asn1.x500.X500Name;
+import org.bouncycastle.asn1.x500.style.IETFUtils;
+import org.bouncycastle.asn1.x500.style.RFC4519Style;
+import org.bouncycastle.asn1.x509.BasicConstraints;
+import org.bouncycastle.asn1.x509.Extension;
+import org.bouncycastle.asn1.x509.KeyUsage;
+import org.bouncycastle.cert.CertIOException;
+import org.bouncycastle.cert.X509CertificateHolder;
+import org.bouncycastle.cert.jcajce.JcaX509ExtensionUtils;
+import org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder;
+import org.bouncycastle.jce.provider.BouncyCastleProvider;
+import org.bouncycastle.jce.provider.X509CertificateObject;
+import org.bouncycastle.operator.OperatorCreationException;
+import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.beust.jcommander.JCommander;
+import com.beust.jcommander.Parameter;
+import com.google.common.base.Predicate;
+
+public class CertUtils {
+ private static final Logger log = LoggerFactory.getLogger(CertUtils.class);
+ static {
+ Security.addProvider(new BouncyCastleProvider());
+ }
+
+ static class Opts extends Help {
+ @Parameter(description = "generate-all | generate-local | generate-self-trusted", required = true, arity = 1)
+ List<String> operation = null;
+
+ @Parameter(names = {"--local-keystore"}, description = "Target path for generated keystore")
+ String localKeystore = null;
+
+ @Parameter(names = {"--root-keystore"}, description = "Path to root truststore, generated with generate-all, or used for signing with generate-local")
+ String rootKeystore = null;
+
+ @Parameter(names = {"--root-truststore"}, description = "Target path for generated public root truststore")
+ String truststore = null;
+
+ @Parameter(names = {"--keystore-type"}, description = "Type of keystore file to use")
+ String keystoreType = "JKS";
+
+ @Parameter(names = {"--root-keystore-password"}, description = "Password for root keystore, falls back to --keystore-password if not provided")
+ String rootKeystorePassword = null;
+
+ @Parameter(
+ names = {"--keystore-password"},
+ description = "Password used to encrypt keystores. If omitted, the instance-wide secret will be used. If specified, the password must also be explicitly configured in Accumulo.")
+ String keystorePassword = null;
+
+ @Parameter(names = {"--truststore-password"}, description = "Password used to encrypt the truststore. If omitted, empty password is used")
+ String truststorePassword = "";
+
+ @Parameter(names = {"--key-name-prefix"}, description = "Prefix for names of generated keys")
+ String keyNamePrefix = CertUtils.class.getSimpleName();
+
+ @Parameter(names = {"--issuer-rdn"}, description = "RDN string for issuer, for example: 'c=US,o=My Organization,cn=My Name'")
+ String issuerDirString = "o=Apache Accumulo";
+
+ @Parameter(names = "--site-file", description = "Load configuration from the given site file")
+ public String siteFile = null;
+
+ @Parameter(names = "--signing-algorithm", description = "Algorithm used to sign certificates")
+ public String signingAlg = "SHA256WITHRSA";
+
+ @Parameter(names = "--encryption-algorithm", description = "Algorithm used to encrypt private keys")
+ public String encryptionAlg = "RSA";
+
+ @Parameter(names = "--keysize", description = "Key size used by encryption algorithm")
+ public int keysize = 2048;
+
+ public AccumuloConfiguration getConfiguration() {
+ if (siteFile == null) {
+ return SiteConfiguration.getInstance(DefaultConfiguration.getInstance());
+ } else {
+ return new AccumuloConfiguration() {
+ Configuration xml = new Configuration();
+ {
+ xml.addResource(new Path(siteFile));
+ }
+
+ @Override
+ public Iterator<Entry<String,String>> iterator() {
+ TreeMap<String,String> map = new TreeMap<String,String>();
+ for (Entry<String,String> props : DefaultConfiguration.getInstance())
+ map.put(props.getKey(), props.getValue());
+ for (Entry<String,String> props : xml)
+ map.put(props.getKey(), props.getValue());
+ return map.entrySet().iterator();
+ }
+
+ @Override
+ public String get(Property property) {
+ String value = xml.get(property.getKey());
+ if (value != null)
+ return value;
+ return DefaultConfiguration.getInstance().get(property);
+ }
+
+ @Override
+ public void getProperties(Map<String,String> props, Predicate<String> filter) {
+ for (Entry<String,String> entry : this)
+ if (filter.apply(entry.getKey()))
+ props.put(entry.getKey(), entry.getValue());
+ }
+ };
+ }
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ Opts opts = new Opts();
+ opts.parseArgs(CertUtils.class.getName(), args);
+ String operation = opts.operation.get(0);
+
+ String keyPassword = opts.keystorePassword;
+ if (keyPassword == null)
+ keyPassword = getDefaultKeyPassword();
+
+ String rootKeyPassword = opts.rootKeystorePassword;
+ if (rootKeyPassword == null) {
+ rootKeyPassword = keyPassword;
+ }
+
+ CertUtils certUtils = new CertUtils(opts.keystoreType, opts.issuerDirString, opts.encryptionAlg, opts.keysize, opts.signingAlg);
+
+ if ("generate-all".equals(operation)) {
+ certUtils.createAll(new File(opts.rootKeystore), new File(opts.localKeystore), new File(opts.truststore), opts.keyNamePrefix, rootKeyPassword,
+ keyPassword, opts.truststorePassword);
+ } else if ("generate-local".equals(operation)) {
+ certUtils.createSignedCert(new File(opts.localKeystore), opts.keyNamePrefix + "-local", keyPassword, opts.rootKeystore, rootKeyPassword);
+ } else if ("generate-self-trusted".equals(operation)) {
+ certUtils.createSelfSignedCert(new File(opts.truststore), opts.keyNamePrefix + "-selfTrusted", keyPassword);
+ } else {
+ JCommander jcommander = new JCommander(opts);
+ jcommander.setProgramName(CertUtils.class.getName());
+ jcommander.usage();
+ System.err.println("Unrecognized operation: " + opts.operation);
+ System.exit(0);
+ }
+ }
+
+ private static String getDefaultKeyPassword() {
+ return SiteConfiguration.getInstance(DefaultConfiguration.getInstance()).get(Property.INSTANCE_SECRET);
+ }
+
+ private String issuerDirString;
+ private String keystoreType;
+ private String encryptionAlgorithm;
+ private int keysize;
+ private String signingAlgorithm;
+
+ public CertUtils(String keystoreType, String issuerDirString, String encryptionAlgorithm, int keysize, String signingAlgorithm) {
+ super();
+ this.keystoreType = keystoreType;
+ this.issuerDirString = issuerDirString;
+ this.encryptionAlgorithm = encryptionAlgorithm;
+ this.keysize = keysize;
+ this.signingAlgorithm = signingAlgorithm;
+ }
+
+ public void createAll(File rootKeystoreFile, File localKeystoreFile, File trustStoreFile, String keyNamePrefix, String rootKeystorePassword,
+ String keystorePassword, String truststorePassword) throws KeyStoreException, CertificateException, NoSuchAlgorithmException, IOException,
+ OperatorCreationException, AccumuloSecurityException, NoSuchProviderException, UnrecoverableKeyException, FileNotFoundException {
+ createSelfSignedCert(rootKeystoreFile, keyNamePrefix + "-root", rootKeystorePassword);
+ createSignedCert(localKeystoreFile, keyNamePrefix + "-local", keystorePassword, rootKeystoreFile.getAbsolutePath(), rootKeystorePassword);
+ createPublicCert(trustStoreFile, keyNamePrefix + "-public", rootKeystoreFile.getAbsolutePath(), rootKeystorePassword, truststorePassword);
+ }
+
+ public void createPublicCert(File targetKeystoreFile, String keyName, String rootKeystorePath, String rootKeystorePassword, String truststorePassword)
+ throws NoSuchAlgorithmException, CertificateException, FileNotFoundException, IOException, KeyStoreException, UnrecoverableKeyException {
+ KeyStore signerKeystore = KeyStore.getInstance(keystoreType);
+ char[] signerPasswordArray = rootKeystorePassword.toCharArray();
+ try (FileInputStream fis = new FileInputStream(rootKeystorePath)) {
+ signerKeystore.load(fis, signerPasswordArray);
+ }
+ Certificate rootCert = findCert(signerKeystore);
+
+ KeyStore keystore = KeyStore.getInstance(keystoreType);
+ keystore.load(null, null);
+ keystore.setCertificateEntry(keyName + "Cert", rootCert);
+ try (FileOutputStream fos = new FileOutputStream(targetKeystoreFile)) {
+ keystore.store(fos, truststorePassword.toCharArray());
+ }
+ }
+
+ public void createSignedCert(File targetKeystoreFile, String keyName, String keystorePassword, String signerKeystorePath, String signerKeystorePassword)
+ throws KeyStoreException, CertificateException, NoSuchAlgorithmException, IOException, OperatorCreationException, AccumuloSecurityException,
+ UnrecoverableKeyException, NoSuchProviderException {
+ KeyStore signerKeystore = KeyStore.getInstance(keystoreType);
+ char[] signerPasswordArray = signerKeystorePassword.toCharArray();
+ try (FileInputStream fis = new FileInputStream(signerKeystorePath)) {
+ signerKeystore.load(fis, signerPasswordArray);
+ }
+ Certificate signerCert = findCert(signerKeystore);
+ PrivateKey signerKey = findPrivateKey(signerKeystore, signerPasswordArray);
+
+ KeyPair kp = generateKeyPair();
+ X509CertificateObject cert = generateCert(keyName, kp, false, signerCert.getPublicKey(), signerKey);
+
+ char[] password = keystorePassword.toCharArray();
+ KeyStore keystore = KeyStore.getInstance(keystoreType);
+ keystore.load(null, null);
+ keystore.setCertificateEntry(keyName + "Cert", cert);
+ keystore.setKeyEntry(keyName + "Key", kp.getPrivate(), password, new Certificate[] {cert, signerCert});
+ try (FileOutputStream fos = new FileOutputStream(targetKeystoreFile)) {
+ keystore.store(fos, password);
+ }
+ }
+
+ public void createSelfSignedCert(File targetKeystoreFile, String keyName, String keystorePassword) throws KeyStoreException, CertificateException,
+ NoSuchAlgorithmException, IOException, OperatorCreationException, AccumuloSecurityException, NoSuchProviderException {
+ if (targetKeystoreFile.exists()) {
+ throw new FileExistsException(targetKeystoreFile);
+ }
+
+ KeyPair kp = generateKeyPair();
+
+ X509CertificateObject cert = generateCert(keyName, kp, true, kp.getPublic(), kp.getPrivate());
+
+ char[] password = keystorePassword.toCharArray();
+ KeyStore keystore = KeyStore.getInstance(keystoreType);
+ keystore.load(null, null);
+ keystore.setCertificateEntry(keyName + "Cert", cert);
+ keystore.setKeyEntry(keyName + "Key", kp.getPrivate(), password, new Certificate[] {cert});
+ try (FileOutputStream fos = new FileOutputStream(targetKeystoreFile)) {
+ keystore.store(fos, password);
+ }
+ }
+
+ private KeyPair generateKeyPair() throws NoSuchAlgorithmException, NoSuchProviderException {
+ KeyPairGenerator gen = KeyPairGenerator.getInstance(encryptionAlgorithm);
+ gen.initialize(keysize);
+ return gen.generateKeyPair();
+ }
+
+ private X509CertificateObject generateCert(String keyName, KeyPair kp, boolean isCertAuthority, PublicKey signerPublicKey, PrivateKey signerPrivateKey)
+ throws IOException, CertIOException, OperatorCreationException, CertificateException, NoSuchAlgorithmException {
+ Calendar startDate = Calendar.getInstance();
+ Calendar endDate = Calendar.getInstance();
+ endDate.add(Calendar.YEAR, 100);
+
+ BigInteger serialNumber = BigInteger.valueOf((startDate.getTimeInMillis()));
+ X500Name issuer = new X500Name(IETFUtils.rDNsFromString(issuerDirString, RFC4519Style.INSTANCE));
+ JcaX509v3CertificateBuilder certGen = new JcaX509v3CertificateBuilder(issuer, serialNumber, startDate.getTime(), endDate.getTime(), issuer, kp.getPublic());
+ JcaX509ExtensionUtils extensionUtils = new JcaX509ExtensionUtils();
+ certGen.addExtension(Extension.subjectKeyIdentifier, false, extensionUtils.createSubjectKeyIdentifier(kp.getPublic()));
+ certGen.addExtension(Extension.basicConstraints, false, new BasicConstraints(isCertAuthority));
+ certGen.addExtension(Extension.authorityKeyIdentifier, false, extensionUtils.createAuthorityKeyIdentifier(signerPublicKey));
+ if (isCertAuthority) {
+ certGen.addExtension(Extension.keyUsage, true, new KeyUsage(KeyUsage.keyCertSign));
+ }
+ X509CertificateHolder cert = certGen.build(new JcaContentSignerBuilder(signingAlgorithm).build(signerPrivateKey));
+ return new X509CertificateObject(cert.toASN1Structure());
+ }
+
+ static Certificate findCert(KeyStore keyStore) throws KeyStoreException {
+ Enumeration<String> aliases = keyStore.aliases();
+ Certificate cert = null;
+ while (aliases.hasMoreElements()) {
+ String alias = aliases.nextElement();
+ if (keyStore.isCertificateEntry(alias)) {
+ if (cert == null) {
+ cert = keyStore.getCertificate(alias);
+ } else {
+ log.warn("Found multiple certificates in keystore. Ignoring " + alias);
+ }
+ }
+ }
+ if (cert == null) {
+ throw new KeyStoreException("Could not find cert in keystore");
+ }
+ return cert;
+ }
+
+ static PrivateKey findPrivateKey(KeyStore keyStore, char[] keystorePassword) throws UnrecoverableKeyException, KeyStoreException, NoSuchAlgorithmException {
+ Enumeration<String> aliases = keyStore.aliases();
+ PrivateKey key = null;
+ while (aliases.hasMoreElements()) {
+ String alias = aliases.nextElement();
+ if (keyStore.isKeyEntry(alias)) {
+ if (key == null) {
+ key = (PrivateKey) keyStore.getKey(alias, keystorePassword);
+ } else {
+ log.warn("Found multiple keys in keystore. Ignoring " + alias);
+ }
+ }
+ }
+ if (key == null) {
+ throw new KeyStoreException("Could not find private key in keystore");
+ }
+ return key;
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/FooConstraint.jar
----------------------------------------------------------------------
diff --git a/test/src/main/resources/FooConstraint.jar b/test/src/main/resources/FooConstraint.jar
new file mode 100644
index 0000000..14673da
Binary files /dev/null and b/test/src/main/resources/FooConstraint.jar differ
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/FooFilter.jar
----------------------------------------------------------------------
diff --git a/test/src/main/resources/FooFilter.jar b/test/src/main/resources/FooFilter.jar
new file mode 100644
index 0000000..ef30cbc
Binary files /dev/null and b/test/src/main/resources/FooFilter.jar differ
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/TestCombinerX.jar
----------------------------------------------------------------------
diff --git a/test/src/main/resources/TestCombinerX.jar b/test/src/main/resources/TestCombinerX.jar
new file mode 100644
index 0000000..849e447
Binary files /dev/null and b/test/src/main/resources/TestCombinerX.jar differ
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/TestCombinerY.jar
----------------------------------------------------------------------
diff --git a/test/src/main/resources/TestCombinerY.jar b/test/src/main/resources/TestCombinerY.jar
new file mode 100644
index 0000000..30da0cbf
Binary files /dev/null and b/test/src/main/resources/TestCombinerY.jar differ
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/TestCompactionStrat.jar
----------------------------------------------------------------------
diff --git a/test/src/main/resources/TestCompactionStrat.jar b/test/src/main/resources/TestCompactionStrat.jar
new file mode 100644
index 0000000..3daa16e
Binary files /dev/null and b/test/src/main/resources/TestCompactionStrat.jar differ
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/conf/accumulo-site.xml
----------------------------------------------------------------------
diff --git a/test/src/main/resources/conf/accumulo-site.xml b/test/src/main/resources/conf/accumulo-site.xml
new file mode 100644
index 0000000..e1f0e70
--- /dev/null
+++ b/test/src/main/resources/conf/accumulo-site.xml
@@ -0,0 +1,123 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+ <!--
+ Put your site-specific accumulo configurations here.
+
+ The available configuration values along with their defaults
+ are documented in docs/config.html
+
+ Unless you are simply testing at your workstation, you will most
+ definitely need to change the three entries below.
+ -->
+
+ <property>
+ <name>instance.zookeeper.host</name>
+ <value>localhost:2181</value>
+ <description>comma separated list of zookeeper servers</description>
+ </property>
+
+ <property>
+ <name>logger.dir.walog</name>
+ <value>walogs</value>
+ <description>The directory used to store write-ahead logs on the local filesystem. It is possible to specify a comma-separated list of directories.
+ </description>
+ </property>
+
+ <property>
+ <name>instance.secret</name>
+ <value>DEFAULT</value>
+ <description>A secret unique to a given instance that all servers must know in order to communicate with one another.
+ Change it before initialization. To
+ change it later use ./bin/accumulo org.apache.accumulo.server.util.ChangeSecret [oldpasswd] [newpasswd],
+ and then update this file.
+ </description>
+ </property>
+
+ <property>
+ <name>tserver.memory.maps.max</name>
+ <value>80M</value>
+ </property>
+
+ <property>
+ <name>tserver.cache.data.size</name>
+ <value>7M</value>
+ </property>
+
+ <property>
+ <name>tserver.cache.index.size</name>
+ <value>20M</value>
+ </property>
+
+ <property>
+ <name>trace.password</name>
+ <!--
+ change this to the root user's password, and/or change the user below
+ -->
+ <value>secret</value>
+ </property>
+
+ <property>
+ <name>trace.user</name>
+ <value>root</value>
+ </property>
+
+ <property>
+ <name>tserver.sort.buffer.size</name>
+ <value>50M</value>
+ </property>
+
+ <property>
+ <name>tserver.walog.max.size</name>
+ <value>100M</value>
+ </property>
+
+ <property>
+ <name>general.classpaths</name>
+ <!--
+ Add the following for hadoop-2.0
+ $HADOOP_PREFIX/share/hadoop/common/.*.jar,
+ $HADOOP_PREFIX/share/hadoop/common/lib/.*.jar,
+ $HADOOP_PREFIX/share/hadoop/hdfs/.*.jar,
+ $HADOOP_PREFIX/share/hadoop/mapreduce/.*.jar,
+ $HADOOP_PREFIX/share/hadoop/yarn/.*.jar,
+ -->
+ <value>
+ $ACCUMULO_HOME/server/target/classes/,
+ $ACCUMULO_HOME/lib/accumulo-server.jar,
+ $ACCUMULO_HOME/core/target/classes/,
+ $ACCUMULO_HOME/lib/accumulo-core.jar,
+ $ACCUMULO_HOME/start/target/classes/,
+ $ACCUMULO_HOME/lib/accumulo-start.jar,
+ $ACCUMULO_HOME/fate/target/classes/,
+ $ACCUMULO_HOME/lib/accumulo-fate.jar,
+ $ACCUMULO_HOME/proxy/target/classes/,
+ $ACCUMULO_HOME/lib/accumulo-proxy.jar,
+ $ACCUMULO_HOME/lib/[^.].*.jar,
+ $ZOOKEEPER_HOME/zookeeper[^.].*.jar,
+ $HADOOP_CONF_DIR,
+ $HADOOP_PREFIX/[^.].*.jar,
+ $HADOOP_PREFIX/lib/[^.].*.jar,
+ </value>
+ <description>Classpaths that accumulo checks for updates and class files.
+ When using the Security Manager, please remove the ".../target/classes/" values.
+ </description>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/resources/conf/generic_logger.xml
----------------------------------------------------------------------
diff --git a/test/src/main/resources/conf/generic_logger.xml b/test/src/main/resources/conf/generic_logger.xml
new file mode 100644
index 0000000..db79efe
--- /dev/null
+++ b/test/src/main/resources/conf/generic_logger.xml
@@ -0,0 +1,83 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
+<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
+
+ <!-- Write out everything at the DEBUG level to the debug log -->
+ <appender name="A2" class="org.apache.log4j.RollingFileAppender">
+ <param name="File" value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.debug.log"/>
+ <param name="MaxFileSize" value="1000MB"/>
+ <param name="MaxBackupIndex" value="10"/>
+ <param name="Threshold" value="DEBUG"/>
+ <layout class="org.apache.log4j.PatternLayout">
+ <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %m%n"/>
+ </layout>
+ </appender>
+
+ <!-- Write out INFO and higher to the regular log -->
+ <appender name="A3" class="org.apache.log4j.RollingFileAppender">
+ <param name="File" value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.log"/>
+ <param name="MaxFileSize" value="1000MB"/>
+ <param name="MaxBackupIndex" value="10"/>
+ <param name="Threshold" value="INFO"/>
+ <layout class="org.apache.log4j.PatternLayout">
+ <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %m%n"/>
+ </layout>
+ </appender>
+
+ <!-- Send all logging data to a centralized logger -->
+ <appender name="N1" class="org.apache.log4j.net.SocketAppender">
+ <param name="remoteHost" value="${org.apache.accumulo.core.host.log}"/>
+ <param name="port" value="${org.apache.accumulo.core.host.log.port}"/>
+ <param name="application" value="${org.apache.accumulo.core.application}:${org.apache.accumulo.core.ip.localhost.hostname}"/>
+ <param name="Threshold" value="WARN"/>
+ </appender>
+
+ <!-- If the centralized logger is down, buffer the log events, but drop them if it stays down -->
+ <appender name="ASYNC" class="org.apache.log4j.AsyncAppender">
+ <appender-ref ref="N1" />
+ </appender>
+
+ <!-- Log accumulo events to the debug, normal and remote logs. -->
+ <logger name="org.apache.accumulo" additivity="false">
+ <level value="DEBUG"/>
+ <appender-ref ref="A2" />
+ <appender-ref ref="A3" />
+ <appender-ref ref="ASYNC" />
+ </logger>
+
+ <logger name="org.apache.accumulo.core.file.rfile.bcfile">
+ <level value="INFO"/>
+ </logger>
+
+ <logger name="org.mortbay.log">
+ <level value="WARN"/>
+ </logger>
+
+ <logger name="org.apache.zookeeper">
+ <level value="ERROR"/>
+ </logger>
+
+ <!-- Log non-accumulo events to the debug and normal logs. -->
+ <root>
+ <level value="INFO"/>
+ <appender-ref ref="A2" />
+ <appender-ref ref="A3" />
+ </root>
+
+</log4j:configuration>
[14/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
deleted file mode 100644
index fbbe542..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor;
-import org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor;
-import org.apache.accumulo.core.file.keyfunctor.RowFunctor;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public class BloomFilterIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(BloomFilterIT.class);
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setDefaultMemory(1, MemoryUnit.GIGABYTE);
- cfg.setNumTservers(1);
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX.getKey(), "10M");
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 6 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- final String readAhead = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey());
- c.instanceOperations().setProperty(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey(), "1");
- try {
- Thread.sleep(1000);
- final String[] tables = getUniqueNames(4);
- for (String table : tables) {
- TableOperations tops = c.tableOperations();
- tops.create(table);
- tops.setProperty(table, Property.TABLE_INDEXCACHE_ENABLED.getKey(), "false");
- tops.setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "false");
- tops.setProperty(table, Property.TABLE_BLOOM_SIZE.getKey(), "2000000");
- tops.setProperty(table, Property.TABLE_BLOOM_ERRORRATE.getKey(), "1%");
- tops.setProperty(table, Property.TABLE_BLOOM_LOAD_THRESHOLD.getKey(), "0");
- tops.setProperty(table, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64K");
- }
- log.info("Writing");
- write(c, tables[0], 1, 0, 2000000000, 500);
- write(c, tables[1], 2, 0, 2000000000, 500);
- write(c, tables[2], 3, 0, 2000000000, 500);
- log.info("Writing complete");
-
- // test inserting an empty key
- BatchWriter bw = c.createBatchWriter(tables[3], new BatchWriterConfig());
- Mutation m = new Mutation(new Text(""));
- m.put(new Text(""), new Text(""), new Value("foo1".getBytes()));
- bw.addMutation(m);
- bw.close();
- c.tableOperations().flush(tables[3], null, null, true);
-
- for (String table : Arrays.asList(tables[0], tables[1], tables[2])) {
- c.tableOperations().compact(table, null, null, true, true);
- }
-
- // ensure compactions are finished
- for (String table : tables) {
- FunctionalTestUtils.checkRFiles(c, table, 1, 1, 1, 1);
- }
-
- // these queries should only run quickly if bloom filters are working, so lets get a base
- log.info("Base query");
- long t1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500);
- long t2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500);
- long t3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500);
- log.info("Base query complete");
-
- log.info("Rewriting with bloom filters");
- c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
- c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName());
-
- c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
- c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), ColumnFamilyFunctor.class.getName());
-
- c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
- c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), ColumnQualifierFunctor.class.getName());
-
- c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
- c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName());
-
- // ensure the updates to zookeeper propagate
- UtilWaitThread.sleep(500);
-
- c.tableOperations().compact(tables[3], null, null, false, true);
- c.tableOperations().compact(tables[0], null, null, false, true);
- c.tableOperations().compact(tables[1], null, null, false, true);
- c.tableOperations().compact(tables[2], null, null, false, true);
- log.info("Rewriting with bloom filters complete");
-
- // these queries should only run quickly if bloom
- // filters are working
- log.info("Bloom query");
- long tb1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500);
- long tb2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500);
- long tb3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500);
- log.info("Bloom query complete");
- timeCheck(t1 + t2 + t3, tb1 + tb2 + tb3);
-
- // test querying for empty key
- Scanner scanner = c.createScanner(tables[3], Authorizations.EMPTY);
- scanner.setRange(new Range(new Text("")));
-
- if (!scanner.iterator().next().getValue().toString().equals("foo1")) {
- throw new Exception("Did not see foo1");
- }
- } finally {
- c.instanceOperations().setProperty(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey(), readAhead);
- }
- }
-
- private void timeCheck(long t1, long t2) throws Exception {
- double improvement = (t1 - t2) * 1.0 / t1;
- if (improvement < .1) {
- throw new Exception("Queries had less than 10% improvement (old: " + t1 + " new: " + t2 + " improvement: " + (improvement * 100) + "%)");
- }
- log.info(String.format("Improvement: %.2f%% (%d vs %d)", (improvement * 100), t1, t2));
- }
-
- private long query(Connector c, String table, int depth, long start, long end, int num, int step) throws Exception {
- Random r = new Random(42);
-
- HashSet<Long> expected = new HashSet<Long>();
- List<Range> ranges = new ArrayList<Range>(num);
- Text key = new Text();
- Text row = new Text("row"), cq = new Text("cq"), cf = new Text("cf");
-
- for (int i = 0; i < num; ++i) {
- Long k = ((r.nextLong() & 0x7fffffffffffffffl) % (end - start)) + start;
- key.set(String.format("k_%010d", k));
- Range range = null;
- Key acuKey;
-
- if (k % (start + step) == 0) {
- expected.add(k);
- }
-
- switch (depth) {
- case 1:
- range = new Range(new Text(key));
- break;
- case 2:
- acuKey = new Key(row, key, cq);
- range = new Range(acuKey, true, acuKey.followingKey(PartialKey.ROW_COLFAM), false);
- break;
- case 3:
- acuKey = new Key(row, cf, key);
- range = new Range(acuKey, true, acuKey.followingKey(PartialKey.ROW_COLFAM_COLQUAL), false);
- break;
- }
-
- ranges.add(range);
- }
-
- BatchScanner bs = c.createBatchScanner(table, Authorizations.EMPTY, 1);
- bs.setRanges(ranges);
-
- long t1 = System.currentTimeMillis();
- for (Entry<Key,Value> entry : bs) {
- long v = Long.parseLong(entry.getValue().toString());
- if (!expected.remove(v)) {
- throw new Exception("Got unexpected return " + entry.getKey() + " " + entry.getValue());
- }
- }
- long t2 = System.currentTimeMillis();
-
- if (expected.size() > 0) {
- throw new Exception("Did not get all expected values " + expected.size());
- }
-
- bs.close();
-
- return t2 - t1;
- }
-
- private void write(Connector c, String table, int depth, long start, long end, int step) throws Exception {
-
- BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
-
- for (long i = start; i < end; i += step) {
- String key = String.format("k_%010d", i);
-
- Mutation m = null;
-
- switch (depth) {
- case 1:
- m = new Mutation(new Text(key));
- m.put(new Text("cf"), new Text("cq"), new Value(("" + i).getBytes()));
- break;
- case 2:
- m = new Mutation(new Text("row"));
- m.put(new Text(key), new Text("cq"), new Value(("" + i).getBytes()));
- break;
- case 3:
- m = new Mutation(new Text("row"));
- m.put(new Text("cf"), new Text(key), new Value(("" + i).getBytes()));
- break;
- }
-
- bw.addMutation(m);
- }
-
- bw.close();
-
- c.tableOperations().flush(table, null, null, true);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BulkFileIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BulkFileIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BulkFileIT.java
deleted file mode 100644
index 1abafeb..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BulkFileIT.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVWriter;
-import org.apache.accumulo.core.file.rfile.RFile;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.MemoryUnit;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.conf.ServerConfigurationFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class BulkFileIT extends AccumuloClusterHarness {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration conf) {
- cfg.setMemory(ServerType.TABLET_SERVER, 128 * 4, MemoryUnit.MEGABYTE);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- @Test
- public void testBulkFile() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- SortedSet<Text> splits = new TreeSet<Text>();
- for (String split : "0333 0666 0999 1333 1666".split(" "))
- splits.add(new Text(split));
- c.tableOperations().addSplits(tableName, splits);
- Configuration conf = new Configuration();
- AccumuloConfiguration aconf = new ServerConfigurationFactory(c.getInstance()).getConfiguration();
- FileSystem fs = getCluster().getFileSystem();
-
- String rootPath = cluster.getTemporaryPath().toString();
-
- String dir = rootPath + "/bulk_test_diff_files_89723987592_" + getUniqueNames(1)[0];
-
- fs.delete(new Path(dir), true);
-
- FileSKVWriter writer1 = FileOperations.getInstance().openWriter(dir + "/f1." + RFile.EXTENSION, fs, conf, aconf);
- writer1.startDefaultLocalityGroup();
- writeData(writer1, 0, 333);
- writer1.close();
-
- FileSKVWriter writer2 = FileOperations.getInstance().openWriter(dir + "/f2." + RFile.EXTENSION, fs, conf, aconf);
- writer2.startDefaultLocalityGroup();
- writeData(writer2, 334, 999);
- writer2.close();
-
- FileSKVWriter writer3 = FileOperations.getInstance().openWriter(dir + "/f3." + RFile.EXTENSION, fs, conf, aconf);
- writer3.startDefaultLocalityGroup();
- writeData(writer3, 1000, 1999);
- writer3.close();
-
- FunctionalTestUtils.bulkImport(c, fs, tableName, dir);
-
- FunctionalTestUtils.checkRFiles(c, tableName, 6, 6, 1, 1);
-
- verifyData(tableName, 0, 1999);
-
- }
-
- private void verifyData(String table, int s, int e) throws Exception {
- Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
-
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
-
- for (int i = s; i <= e; i++) {
- if (!iter.hasNext())
- throw new Exception("row " + i + " not found");
-
- Entry<Key,Value> entry = iter.next();
-
- String row = String.format("%04d", i);
-
- if (!entry.getKey().getRow().equals(new Text(row)))
- throw new Exception("unexpected row " + entry.getKey() + " " + i);
-
- if (Integer.parseInt(entry.getValue().toString()) != i)
- throw new Exception("unexpected value " + entry + " " + i);
- }
-
- if (iter.hasNext())
- throw new Exception("found more than expected " + iter.next());
- }
-
- private void writeData(FileSKVWriter w, int s, int e) throws Exception {
- for (int i = s; i <= e; i++) {
- w.append(new Key(new Text(String.format("%04d", i))), new Value(Integer.toString(i).getBytes(UTF_8)));
- }
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java
deleted file mode 100644
index f60724e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.TestIngest.Opts;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-public class BulkIT extends AccumuloClusterHarness {
-
- private static final int N = 100000;
- private static final int COUNT = 5;
- private static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
- private static final ScannerOpts SOPTS = new ScannerOpts();
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- private Configuration origConf;
-
- @Before
- public void saveConf() {
- origConf = CachedConfiguration.getInstance();
- }
-
- @After
- public void restoreConf() {
- if (null != origConf) {
- CachedConfiguration.setInstance(origConf);
- }
- }
-
- @Test
- public void test() throws Exception {
- runTest(getConnector(), getCluster().getFileSystem(), getCluster().getTemporaryPath(), getAdminPrincipal(), getUniqueNames(1)[0],
- this.getClass().getName(), testName.getMethodName());
- }
-
- static void runTest(Connector c, FileSystem fs, Path basePath, String principal, String tableName, String filePrefix, String dirSuffix) throws Exception {
- c.tableOperations().create(tableName);
- CachedConfiguration.setInstance(fs.getConf());
-
- Path base = new Path(basePath, "testBulkFail_" + dirSuffix);
- fs.delete(base, true);
- fs.mkdirs(base);
- Path bulkFailures = new Path(base, "failures");
- Path files = new Path(base, "files");
- fs.mkdirs(bulkFailures);
- fs.mkdirs(files);
-
- Opts opts = new Opts();
- opts.timestamp = 1;
- opts.random = 56;
- opts.rows = N;
- opts.instance = c.getInstance().getInstanceName();
- opts.cols = 1;
- opts.setTableName(tableName);
- opts.conf = CachedConfiguration.getInstance();
- opts.fs = fs;
- String fileFormat = filePrefix + "rf%02d";
- for (int i = 0; i < COUNT; i++) {
- opts.outputFile = new Path(files, String.format(fileFormat, i)).toString();
- opts.startRow = N * i;
- TestIngest.ingest(c, opts, BWOPTS);
- }
- opts.outputFile = base + String.format(fileFormat, N);
- opts.startRow = N;
- opts.rows = 1;
- // create an rfile with one entry, there was a bug with this:
- TestIngest.ingest(c, opts, BWOPTS);
-
- // Make sure the server can modify the files
- FsShell fsShell = new FsShell(fs.getConf());
- Assert.assertEquals("Failed to chmod " + base.toString(), 0, fsShell.run(new String[] {"-chmod", "-R", "777", base.toString()}));
-
- c.tableOperations().importDirectory(tableName, files.toString(), bulkFailures.toString(), false);
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
- vopts.setTableName(tableName);
- vopts.random = 56;
- vopts.setPrincipal(principal);
- for (int i = 0; i < COUNT; i++) {
- vopts.startRow = i * N;
- vopts.rows = N;
- VerifyIngest.verifyIngest(c, vopts, SOPTS);
- }
- vopts.startRow = N;
- vopts.rows = 1;
- VerifyIngest.verifyIngest(c, vopts, SOPTS);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
deleted file mode 100644
index 74d3e96..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static com.google.common.base.Charsets.UTF_8;
-
-import org.apache.accumulo.core.cli.ClientOpts.Password;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
-import org.apache.accumulo.core.client.security.tokens.KerberosToken;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-/**
- * This test verifies that when a lot of files are bulk imported into a table with one tablet and then splits that not all map files go to the children tablets.
- */
-
-public class BulkSplitOptimizationIT extends AccumuloClusterHarness {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.TSERV_MAJC_DELAY, "1s");
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- private String majcDelay;
-
- @Before
- public void alterConfig() throws Exception {
- Connector conn = getConnector();
- majcDelay = conn.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
- if (!"1s".equals(majcDelay)) {
- conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1s");
- getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- getClusterControl().startAllServers(ServerType.TABLET_SERVER);
- }
- }
-
- @After
- public void resetConfig() throws Exception {
- if (null != majcDelay) {
- Connector conn = getConnector();
- conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
- getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- getClusterControl().startAllServers(ServerType.TABLET_SERVER);
- }
- }
-
- static final int ROWS = 100000;
- static final int SPLITS = 99;
-
- @Test
- public void testBulkSplitOptimization() throws Exception {
- final Connector c = getConnector();
- final String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1000");
- c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "1000");
- c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "1G");
-
- FileSystem fs = getFileSystem();
- Path testDir = new Path(getUsableDir(), "testmf");
- FunctionalTestUtils.createRFiles(c, fs, testDir.toString(), ROWS, SPLITS, 8);
- FileStatus[] stats = fs.listStatus(testDir);
-
- System.out.println("Number of generated files: " + stats.length);
- FunctionalTestUtils.bulkImport(c, fs, tableName, testDir.toString());
- FunctionalTestUtils.checkSplits(c, tableName, 0, 0);
- FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 100, 100);
-
- // initiate splits
- getConnector().tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "100K");
-
- UtilWaitThread.sleep(2000);
-
- // wait until over split threshold -- should be 78 splits
- while (getConnector().tableOperations().listSplits(tableName).size() < 75) {
- UtilWaitThread.sleep(500);
- }
-
- FunctionalTestUtils.checkSplits(c, tableName, 50, 100);
- VerifyIngest.Opts opts = new VerifyIngest.Opts();
- opts.timestamp = 1;
- opts.dataSize = 50;
- opts.random = 56;
- opts.rows = 100000;
- opts.startRow = 0;
- opts.cols = 1;
- opts.setTableName(tableName);
-
- AuthenticationToken adminToken = getAdminToken();
- if (adminToken instanceof PasswordToken) {
- PasswordToken token = (PasswordToken) getAdminToken();
- opts.setPassword(new Password(new String(token.getPassword(), UTF_8)));
- opts.setPrincipal(getAdminPrincipal());
- } else if (adminToken instanceof KerberosToken) {
- ClientConfiguration clientConf = cluster.getClientConfig();
- opts.updateKerberosCredentials(clientConf);
- } else {
- Assert.fail("Unknown token type");
- }
-
- VerifyIngest.verifyIngest(c, opts, new ScannerOpts());
-
- // ensure each tablet does not have all map files, should be ~2.5 files per tablet
- FunctionalTestUtils.checkRFiles(c, tableName, 50, 100, 1, 4);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBalancerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBalancerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBalancerIT.java
deleted file mode 100644
index 4055c3a..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBalancerIT.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Map;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.master.balancer.ChaoticLoadBalancer;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class ChaoticBalancerIT extends AccumuloClusterHarness {
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.TSERV_MAXMEM.getKey(), "10K");
- siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "0");
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- String[] names = getUniqueNames(2);
- String tableName = names[0], unused = names[1];
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_LOAD_BALANCER.getKey(), ChaoticLoadBalancer.class.getName());
- c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
- SortedSet<Text> splits = new TreeSet<Text>();
- for (int i = 0; i < 100; i++) {
- splits.add(new Text(String.format("%03d", i)));
- }
- c.tableOperations().create(unused);
- c.tableOperations().addSplits(unused, splits);
- TestIngest.Opts opts = new TestIngest.Opts();
- VerifyIngest.Opts vopts = new VerifyIngest.Opts();
- vopts.rows = opts.rows = 20000;
- opts.setTableName(tableName);
- vopts.setTableName(tableName);
- ClientConfiguration clientConfig = getCluster().getClientConfig();
- if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConfig);
- vopts.updateKerberosCredentials(clientConfig);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- vopts.setPrincipal(getAdminPrincipal());
- }
- TestIngest.ingest(c, opts, new BatchWriterOpts());
- c.tableOperations().flush(tableName, null, null, true);
- VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ClassLoaderIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ClassLoaderIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ClassLoaderIT.java
deleted file mode 100644
index c06feed..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ClassLoaderIT.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collections;
-import java.util.EnumSet;
-import java.util.Iterator;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.Combiner;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.hamcrest.CoreMatchers;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
-
-public class ClassLoaderIT extends AccumuloClusterHarness {
-
- private static final long ZOOKEEPER_PROPAGATION_TIME = 10 * 1000;
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- private String rootPath;
-
- @Before
- public void checkCluster() {
- Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI));
- MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) getCluster();
- rootPath = mac.getConfig().getDir().getAbsolutePath();
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("row1");
- m.put("cf", "col1", "Test");
- bw.addMutation(m);
- bw.close();
- scanCheck(c, tableName, "Test");
- FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
- Path jarPath = new Path(rootPath + "/lib/ext/Test.jar");
- fs.copyFromLocalFile(new Path(System.getProperty("user.dir") + "/src/test/resources/TestCombinerX.jar"), jarPath);
- UtilWaitThread.sleep(1000);
- IteratorSetting is = new IteratorSetting(10, "TestCombiner", "org.apache.accumulo.test.functional.TestCombiner");
- Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf")));
- c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.scan));
- UtilWaitThread.sleep(ZOOKEEPER_PROPAGATION_TIME);
- scanCheck(c, tableName, "TestX");
- fs.delete(jarPath, true);
- fs.copyFromLocalFile(new Path(System.getProperty("user.dir") + "/src/test/resources/TestCombinerY.jar"), jarPath);
- UtilWaitThread.sleep(5000);
- scanCheck(c, tableName, "TestY");
- fs.delete(jarPath, true);
- }
-
- private void scanCheck(Connector c, String tableName, String expected) throws Exception {
- Scanner bs = c.createScanner(tableName, Authorizations.EMPTY);
- Iterator<Entry<Key,Value>> iterator = bs.iterator();
- assertTrue(iterator.hasNext());
- Entry<Key,Value> next = iterator.next();
- assertFalse(iterator.hasNext());
- assertEquals(expected, next.getValue().toString());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/CleanTmpIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CleanTmpIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CleanTmpIT.java
deleted file mode 100644
index 779b407..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/CleanTmpIT.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Iterators;
-
-public class CleanTmpIT extends ConfigurableMacBase {
- private static final Logger log = LoggerFactory.getLogger(CleanTmpIT.class);
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "3s");
- cfg.setNumTservers(1);
- // use raw local file system so walogs sync and flush will work
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- @Test
- public void test() throws Exception {
- Connector c = getConnector();
- // make a table
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- // write to it
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- Mutation m = new Mutation("row");
- m.put("cf", "cq", "value");
- bw.addMutation(m);
- bw.flush();
-
- // Compact memory to make a file
- c.tableOperations().compact(tableName, null, null, true, true);
-
- // Make sure that we'll have a WAL
- m = new Mutation("row2");
- m.put("cf", "cq", "value");
- bw.addMutation(m);
- bw.close();
-
- // create a fake _tmp file in its directory
- String id = c.tableOperations().tableIdMap().get(tableName);
- Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(Range.prefix(id));
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
- Entry<Key,Value> entry = Iterables.getOnlyElement(s);
- Path file = new Path(entry.getKey().getColumnQualifier().toString());
-
- FileSystem fs = getCluster().getFileSystem();
- assertTrue("Could not find file: " + file, fs.exists(file));
- Path tabletDir = file.getParent();
- assertNotNull("Tablet dir should not be null", tabletDir);
- Path tmp = new Path(tabletDir, "junk.rf_tmp");
- // Make the file
- fs.create(tmp).close();
- log.info("Created tmp file {}", tmp.toString());
- getCluster().stop();
- getCluster().start();
-
- Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
- assertEquals(2, Iterators.size(scanner.iterator()));
- // If we performed log recovery, we should have cleaned up any stray files
- assertFalse("File still exists: " + tmp, fs.exists(tmp));
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/CleanUpIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CleanUpIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CleanUpIT.java
deleted file mode 100644
index 1f6d1a0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/CleanUpIT.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.CleanUp;
-import org.apache.accumulo.harness.SharedMiniClusterBase;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Ensures that all threads spawned for ZooKeeper and Thrift connectivity are reaped after calling CleanUp.shutdown().
- *
- * Because this is destructive across the current context classloader, the normal teardown methods will fail (because they attempt to create a Connector). Until
- * the ZooKeeperInstance and Connector are self-contained WRT resource management, we can't leverage the AccumuloClusterBase.
- */
-public class CleanUpIT extends SharedMiniClusterBase {
- private static final Logger log = LoggerFactory.getLogger(CleanUpIT.class);
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 30;
- }
-
- @Test
- public void run() throws Exception {
-
- String tableName = getUniqueNames(1)[0];
- getConnector().tableOperations().create(tableName);
-
- BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
-
- Mutation m1 = new Mutation("r1");
- m1.put("cf1", "cq1", 1, "5");
-
- bw.addMutation(m1);
-
- bw.flush();
-
- Scanner scanner = getConnector().createScanner(tableName, new Authorizations());
-
- int count = 0;
- for (Entry<Key,Value> entry : scanner) {
- count++;
- if (!entry.getValue().toString().equals("5")) {
- Assert.fail("Unexpected value " + entry.getValue());
- }
- }
-
- Assert.assertEquals("Unexpected count", 1, count);
-
- int threadCount = countThreads();
- if (threadCount < 2) {
- printThreadNames();
- Assert.fail("Not seeing expected threads. Saw " + threadCount);
- }
-
- CleanUp.shutdownNow();
-
- Mutation m2 = new Mutation("r2");
- m2.put("cf1", "cq1", 1, "6");
-
- try {
- bw.addMutation(m1);
- bw.flush();
- Assert.fail("batch writer did not fail");
- } catch (Exception e) {
-
- }
-
- try {
- // expect this to fail also, want to clean up batch writer threads
- bw.close();
- Assert.fail("batch writer close not fail");
- } catch (Exception e) {
-
- }
-
- try {
- count = 0;
- Iterator<Entry<Key,Value>> iter = scanner.iterator();
- while (iter.hasNext()) {
- iter.next();
- count++;
- }
- Assert.fail("scanner did not fail");
- } catch (Exception e) {
-
- }
-
- threadCount = countThreads();
- if (threadCount > 0) {
- printThreadNames();
- Assert.fail("Threads did not go away. Saw " + threadCount);
- }
- }
-
- private void printThreadNames() {
- Set<Thread> threads = Thread.getAllStackTraces().keySet();
- Exception e = new Exception();
- for (Thread thread : threads) {
- e.setStackTrace(thread.getStackTrace());
- log.info("thread name: " + thread.getName(), e);
- }
- }
-
- /**
- * count threads that should be cleaned up
- *
- */
- private int countThreads() {
- int count = 0;
- Set<Thread> threads = Thread.getAllStackTraces().keySet();
- for (Thread thread : threads) {
-
- if (thread.getName().toLowerCase().contains("sendthread") || thread.getName().toLowerCase().contains("eventthread"))
- count++;
-
- if (thread.getName().toLowerCase().contains("thrift") && thread.getName().toLowerCase().contains("pool"))
- count++;
- }
-
- return count;
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java
deleted file mode 100644
index b3d0ab5..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/CloneTestIT.java
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.accumulo.cluster.AccumuloCluster;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.admin.DiskUsage;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Test;
-
-/**
- *
- */
-public class CloneTestIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Test
- public void testProps() throws Exception {
- String[] tableNames = getUniqueNames(2);
- String table1 = tableNames[0];
- String table2 = tableNames[1];
-
- Connector c = getConnector();
-
- c.tableOperations().create(table1);
-
- c.tableOperations().setProperty(table1, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1M");
- c.tableOperations().setProperty(table1, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(), "2M");
- c.tableOperations().setProperty(table1, Property.TABLE_FILE_MAX.getKey(), "23");
-
- BatchWriter bw = writeData(table1, c);
-
- Map<String,String> props = new HashMap<String,String>();
- props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "500K");
-
- Set<String> exclude = new HashSet<String>();
- exclude.add(Property.TABLE_FILE_MAX.getKey());
-
- c.tableOperations().clone(table1, table2, true, props, exclude);
-
- Mutation m3 = new Mutation("009");
- m3.put("data", "x", "1");
- m3.put("data", "y", "2");
- bw.addMutation(m3);
- bw.close();
-
- checkData(table2, c);
-
- checkMetadata(table2, c);
-
- HashMap<String,String> tableProps = new HashMap<String,String>();
- for (Entry<String,String> prop : c.tableOperations().getProperties(table2)) {
- tableProps.put(prop.getKey(), prop.getValue());
- }
-
- Assert.assertEquals("500K", tableProps.get(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey()));
- Assert.assertEquals(Property.TABLE_FILE_MAX.getDefaultValue(), tableProps.get(Property.TABLE_FILE_MAX.getKey()));
- Assert.assertEquals("2M", tableProps.get(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey()));
-
- c.tableOperations().delete(table1);
- c.tableOperations().delete(table2);
-
- }
-
- private void checkData(String table2, Connector c) throws TableNotFoundException {
- Scanner scanner = c.createScanner(table2, Authorizations.EMPTY);
-
- HashMap<String,String> expected = new HashMap<String,String>();
- expected.put("001:x", "9");
- expected.put("001:y", "7");
- expected.put("008:x", "3");
- expected.put("008:y", "4");
-
- HashMap<String,String> actual = new HashMap<String,String>();
-
- for (Entry<Key,Value> entry : scanner)
- actual.put(entry.getKey().getRowData().toString() + ":" + entry.getKey().getColumnQualifierData().toString(), entry.getValue().toString());
-
- Assert.assertEquals(expected, actual);
- }
-
- private void checkMetadata(String table, Connector conn) throws Exception {
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
- MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(s);
- String tableId = conn.tableOperations().tableIdMap().get(table);
-
- Assert.assertNotNull("Could not get table id for " + table, tableId);
-
- s.setRange(Range.prefix(tableId));
-
- Key k;
- Text cf = new Text(), cq = new Text();
- int itemsInspected = 0;
- for (Entry<Key,Value> entry : s) {
- itemsInspected++;
- k = entry.getKey();
- k.getColumnFamily(cf);
- k.getColumnQualifier(cq);
-
- if (cf.equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
- Path p = new Path(cq.toString());
- FileSystem fs = cluster.getFileSystem();
- Assert.assertTrue("File does not exist: " + p, fs.exists(p));
- } else if (cf.equals(MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily())) {
- Assert.assertEquals("Saw unexpected cq", MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), cq);
- Path tabletDir = new Path(entry.getValue().toString());
- Path tableDir = tabletDir.getParent();
- Path tablesDir = tableDir.getParent();
-
- Assert.assertEquals(ServerConstants.TABLE_DIR, tablesDir.getName());
- } else {
- Assert.fail("Got unexpected key-value: " + entry);
- throw new RuntimeException();
- }
- }
-
- Assert.assertTrue("Expected to find metadata entries", itemsInspected > 0);
- }
-
- private BatchWriter writeData(String table1, Connector c) throws TableNotFoundException, MutationsRejectedException {
- BatchWriter bw = c.createBatchWriter(table1, new BatchWriterConfig());
-
- Mutation m1 = new Mutation("001");
- m1.put("data", "x", "9");
- m1.put("data", "y", "7");
-
- Mutation m2 = new Mutation("008");
- m2.put("data", "x", "3");
- m2.put("data", "y", "4");
-
- bw.addMutation(m1);
- bw.addMutation(m2);
-
- bw.flush();
- return bw;
- }
-
- @Test
- public void testDeleteClone() throws Exception {
- String[] tableNames = getUniqueNames(3);
- String table1 = tableNames[0];
- String table2 = tableNames[1];
- String table3 = tableNames[2];
-
- Connector c = getConnector();
- AccumuloCluster cluster = getCluster();
- Assume.assumeTrue(cluster instanceof MiniAccumuloClusterImpl);
- MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
- String rootPath = mac.getConfig().getDir().getAbsolutePath();
-
- // verify that deleting a new table removes the files
- c.tableOperations().create(table3);
- writeData(table3, c).close();
- c.tableOperations().flush(table3, null, null, true);
- // check for files
- FileSystem fs = getCluster().getFileSystem();
- String id = c.tableOperations().tableIdMap().get(table3);
- FileStatus[] status = fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id));
- assertTrue(status.length > 0);
- // verify disk usage
- List<DiskUsage> diskUsage = c.tableOperations().getDiskUsage(Collections.singleton(table3));
- assertEquals(1, diskUsage.size());
- assertTrue(diskUsage.get(0).getUsage() > 100);
- // delete the table
- c.tableOperations().delete(table3);
- // verify its gone from the file system
- Path tablePath = new Path(rootPath + "/accumulo/tables/" + id);
- if (fs.exists(tablePath)) {
- status = fs.listStatus(tablePath);
- assertTrue(status == null || status.length == 0);
- }
-
- c.tableOperations().create(table1);
-
- BatchWriter bw = writeData(table1, c);
-
- Map<String,String> props = new HashMap<String,String>();
- props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "500K");
-
- Set<String> exclude = new HashSet<String>();
- exclude.add(Property.TABLE_FILE_MAX.getKey());
-
- c.tableOperations().clone(table1, table2, true, props, exclude);
-
- Mutation m3 = new Mutation("009");
- m3.put("data", "x", "1");
- m3.put("data", "y", "2");
- bw.addMutation(m3);
- bw.close();
-
- // delete source table, should not affect clone
- c.tableOperations().delete(table1);
-
- checkData(table2, c);
-
- c.tableOperations().compact(table2, null, null, true, true);
-
- checkData(table2, c);
-
- c.tableOperations().delete(table2);
-
- }
-
- @Test
- public void testCloneWithSplits() throws Exception {
- Connector conn = getConnector();
-
- List<Mutation> mutations = new ArrayList<Mutation>();
- TreeSet<Text> splits = new TreeSet<Text>();
- for (int i = 0; i < 10; i++) {
- splits.add(new Text(Integer.toString(i)));
- Mutation m = new Mutation(Integer.toString(i));
- m.put("", "", "");
- mutations.add(m);
- }
-
- String[] tables = getUniqueNames(2);
-
- conn.tableOperations().create(tables[0]);
-
- conn.tableOperations().addSplits(tables[0], splits);
-
- BatchWriter bw = conn.createBatchWriter(tables[0], new BatchWriterConfig());
- bw.addMutations(mutations);
- bw.close();
-
- conn.tableOperations().clone(tables[0], tables[1], true, null, null);
-
- conn.tableOperations().deleteRows(tables[1], new Text("4"), new Text("8"));
-
- List<String> rows = Arrays.asList("0", "1", "2", "3", "4", "9");
- List<String> actualRows = new ArrayList<String>();
- for (Entry<Key,Value> entry : conn.createScanner(tables[1], Authorizations.EMPTY)) {
- actualRows.add(entry.getKey().getRow().toString());
- }
-
- Assert.assertEquals(rows, actualRows);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/CombinerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CombinerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CombinerIT.java
deleted file mode 100644
index d4ef18e..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/CombinerIT.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.LongCombiner.Type;
-import org.apache.accumulo.core.iterators.user.SummingCombiner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.junit.Test;
-
-public class CombinerIT extends AccumuloClusterHarness {
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 60;
- }
-
- private void checkSum(String tableName, Connector c) throws Exception {
- Scanner s = c.createScanner(tableName, Authorizations.EMPTY);
- Iterator<Entry<Key,Value>> i = s.iterator();
- assertTrue(i.hasNext());
- Entry<Key,Value> entry = i.next();
- assertEquals("45", entry.getValue().toString());
- assertFalse(i.hasNext());
- }
-
- @Test
- public void aggregationTest() throws Exception {
- Connector c = getConnector();
- String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- IteratorSetting setting = new IteratorSetting(10, SummingCombiner.class);
- SummingCombiner.setEncodingType(setting, Type.STRING);
- SummingCombiner.setColumns(setting, Collections.singletonList(new IteratorSetting.Column("cf")));
- c.tableOperations().attachIterator(tableName, setting);
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- for (int i = 0; i < 10; i++) {
- Mutation m = new Mutation("row1");
- m.put("cf".getBytes(), "col1".getBytes(), ("" + i).getBytes());
- bw.addMutation(m);
- }
- bw.close();
- checkSum(tableName, c);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java
deleted file mode 100644
index 862365f..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.cli.ClientOpts.Password;
-import org.apache.accumulo.core.cli.ScannerOpts;
-import org.apache.accumulo.core.client.ClientConfiguration;
-import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.InstanceOperations;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Iterators;
-
-public class CompactionIT extends AccumuloClusterHarness {
- private static final Logger log = LoggerFactory.getLogger(CompactionIT.class);
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- cfg.setProperty(Property.TSERV_MAJC_THREAD_MAXOPEN, "4");
- cfg.setProperty(Property.TSERV_MAJC_DELAY, "1");
- cfg.setProperty(Property.TSERV_MAJC_MAXCONCURRENT, "1");
- // use raw local file system so walogs sync and flush will work
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 4 * 60;
- }
-
- private String majcThreadMaxOpen, majcDelay, majcMaxConcurrent;
-
- @Before
- public void alterConfig() throws Exception {
- if (ClusterType.STANDALONE == getClusterType()) {
- InstanceOperations iops = getConnector().instanceOperations();
- Map<String,String> config = iops.getSystemConfiguration();
- majcThreadMaxOpen = config.get(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey());
- majcDelay = config.get(Property.TSERV_MAJC_DELAY.getKey());
- majcMaxConcurrent = config.get(Property.TSERV_MAJC_MAXCONCURRENT.getKey());
-
- iops.setProperty(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), "4");
- iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1");
- iops.setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
-
- getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- getClusterControl().startAllServers(ServerType.TABLET_SERVER);
- }
- }
-
- @After
- public void resetConfig() throws Exception {
- // We set the values..
- if (null != majcThreadMaxOpen) {
- InstanceOperations iops = getConnector().instanceOperations();
-
- iops.setProperty(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), majcThreadMaxOpen);
- iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
- iops.setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), majcMaxConcurrent);
-
- getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
- getClusterControl().startAllServers(ServerType.TABLET_SERVER);
- }
- }
-
- @Test
- public void test() throws Exception {
- final Connector c = getConnector();
- final String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
- FileSystem fs = getFileSystem();
- Path root = new Path(cluster.getTemporaryPath(), getClass().getName());
- Path testrf = new Path(root, "testrf");
- FunctionalTestUtils.createRFiles(c, fs, testrf.toString(), 500000, 59, 4);
-
- FunctionalTestUtils.bulkImport(c, fs, tableName, testrf.toString());
- int beforeCount = countFiles(c);
-
- final AtomicBoolean fail = new AtomicBoolean(false);
- final ClientConfiguration clientConf = cluster.getClientConfig();
- for (int count = 0; count < 5; count++) {
- List<Thread> threads = new ArrayList<Thread>();
- final int span = 500000 / 59;
- for (int i = 0; i < 500000; i += 500000 / 59) {
- final int finalI = i;
- Thread t = new Thread() {
- @Override
- public void run() {
- try {
- VerifyIngest.Opts opts = new VerifyIngest.Opts();
- opts.startRow = finalI;
- opts.rows = span;
- opts.random = 56;
- opts.dataSize = 50;
- opts.cols = 1;
- opts.setTableName(tableName);
- if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
- opts.updateKerberosCredentials(clientConf);
- } else {
- opts.setPrincipal(getAdminPrincipal());
- PasswordToken passwordToken = (PasswordToken) getAdminToken();
- opts.setPassword(new Password(new String(passwordToken.getPassword(), UTF_8)));
- }
- VerifyIngest.verifyIngest(c, opts, new ScannerOpts());
- } catch (Exception ex) {
- log.warn("Got exception verifying data", ex);
- fail.set(true);
- }
- }
- };
- t.start();
- threads.add(t);
- }
- for (Thread t : threads)
- t.join();
- assertFalse("Failed to successfully run all threads, Check the test output for error", fail.get());
- }
-
- int finalCount = countFiles(c);
- assertTrue(finalCount < beforeCount);
- try {
- getClusterControl().adminStopAll();
- } finally {
- // Make sure the internal state in the cluster is reset (e.g. processes in MAC)
- getCluster().stop();
- if (ClusterType.STANDALONE == getClusterType()) {
- // Then restart things for the next test if it's a standalone
- getCluster().start();
- }
- }
- }
-
- private int countFiles(Connector c) throws Exception {
- Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.fetchColumnFamily(MetadataSchema.TabletsSection.TabletColumnFamily.NAME);
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
- return Iterators.size(s.iterator());
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java
deleted file mode 100644
index 75eecfd..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ConcurrencyIT.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.util.EnumSet;
-import java.util.Map;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.harness.AccumuloClusterHarness;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class ConcurrencyIT extends AccumuloClusterHarness {
-
- static class ScanTask extends Thread {
-
- int count = 0;
- Scanner scanner;
-
- ScanTask(Connector conn, String tableName, long time) throws Exception {
- scanner = conn.createScanner(tableName, Authorizations.EMPTY);
- IteratorSetting slow = new IteratorSetting(30, "slow", SlowIterator.class);
- SlowIterator.setSleepTime(slow, time);
- scanner.addScanIterator(slow);
- }
-
- @Override
- public void run() {
- count = Iterators.size(scanner.iterator());
- }
-
- }
-
- @Override
- public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- Map<String,String> siteConfig = cfg.getSiteConfig();
- siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
- cfg.setSiteConfig(siteConfig);
- }
-
- @Override
- protected int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- // @formatter:off
- // Below is a diagram of the operations in this test over time.
- //
- // Scan 0 |------------------------------|
- // Scan 1 |----------|
- // Minc 1 |-----|
- // Scan 2 |----------|
- // Scan 3 |---------------|
- // Minc 2 |-----|
- // Majc 1 |-----|
- // @formatter:on
- @Test
- public void run() throws Exception {
- Connector c = getConnector();
- runTest(c, getUniqueNames(1)[0]);
- }
-
- static void runTest(Connector c, String tableName) throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException,
- MutationsRejectedException, Exception, InterruptedException {
- c.tableOperations().create(tableName);
- IteratorSetting is = new IteratorSetting(10, SlowIterator.class);
- SlowIterator.setSleepTime(is, 50);
- c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.minc, IteratorScope.majc));
- c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
-
- BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
- for (int i = 0; i < 50; i++) {
- Mutation m = new Mutation(new Text(String.format("%06d", i)));
- m.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(UTF_8)));
- bw.addMutation(m);
- }
- bw.flush();
-
- ScanTask st0 = new ScanTask(c, tableName, 300);
- st0.start();
-
- ScanTask st1 = new ScanTask(c, tableName, 100);
- st1.start();
-
- UtilWaitThread.sleep(50);
- c.tableOperations().flush(tableName, null, null, true);
-
- for (int i = 0; i < 50; i++) {
- Mutation m = new Mutation(new Text(String.format("%06d", i)));
- m.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(UTF_8)));
- bw.addMutation(m);
- }
-
- bw.flush();
-
- ScanTask st2 = new ScanTask(c, tableName, 100);
- st2.start();
-
- st1.join();
- st2.join();
- if (st1.count != 50)
- throw new Exception("Thread 1 did not see 50, saw " + st1.count);
-
- if (st2.count != 50)
- throw new Exception("Thread 2 did not see 50, saw " + st2.count);
-
- ScanTask st3 = new ScanTask(c, tableName, 150);
- st3.start();
-
- UtilWaitThread.sleep(50);
- c.tableOperations().flush(tableName, null, null, false);
-
- st3.join();
- if (st3.count != 50)
- throw new Exception("Thread 3 did not see 50, saw " + st3.count);
-
- st0.join();
- if (st0.count != 50)
- throw new Exception("Thread 0 did not see 50, saw " + st0.count);
-
- bw.close();
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java
deleted file mode 100644
index 66695e0..0000000
--- a/test/src/test/java/org/apache/accumulo/test/functional/ConfigurableCompactionIT.java
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Map;
-import java.util.Random;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.fate.util.UtilWaitThread;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.tserver.compaction.CompactionPlan;
-import org.apache.accumulo.tserver.compaction.CompactionStrategy;
-import org.apache.accumulo.tserver.compaction.MajorCompactionRequest;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterators;
-
-public class ConfigurableCompactionIT extends ConfigurableMacBase {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 2 * 60;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "1s"));
- }
-
- public static class SimpleCompactionStrategy extends CompactionStrategy {
-
- @Override
- public void init(Map<String,String> options) {
- String countString = options.get("count");
- if (countString != null)
- count = Integer.parseInt(countString);
- }
-
- int count = 3;
-
- @Override
- public boolean shouldCompact(MajorCompactionRequest request) throws IOException {
- return request.getFiles().size() == count;
-
- }
-
- @Override
- public CompactionPlan getCompactionPlan(MajorCompactionRequest request) throws IOException {
- CompactionPlan result = new CompactionPlan();
- result.inputFiles.addAll(request.getFiles().keySet());
- return result;
- }
-
- }
-
- @Test
- public void test() throws Exception {
- final Connector c = getConnector();
- final String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY.getKey(), SimpleCompactionStrategy.class.getName());
- runTest(c, tableName, 3);
- c.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY_PREFIX.getKey() + "count", "" + 5);
- runTest(c, tableName, 5);
- }
-
- @Test
- public void testPerTableClasspath() throws Exception {
- final Connector c = getConnector();
- final String tableName = getUniqueNames(1)[0];
- c.tableOperations().create(tableName);
- c.instanceOperations().setProperty(Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "context1",
- System.getProperty("user.dir") + "/src/test/resources/TestCompactionStrat.jar");
- c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "10");
- c.tableOperations().setProperty(tableName, Property.TABLE_CLASSPATH.getKey(), "context1");
- // EfgCompactionStrat will only compact a tablet w/ end row of 'efg'. No other tablets are compacted.
- c.tableOperations().setProperty(tableName, Property.TABLE_COMPACTION_STRATEGY.getKey(), "org.apache.accumulo.test.EfgCompactionStrat");
-
- c.tableOperations().addSplits(tableName, new TreeSet<Text>(Arrays.asList(new Text("efg"))));
-
- for (char ch = 'a'; ch < 'l'; ch++)
- writeFlush(c, tableName, ch + "");
-
- while (countFiles(c, tableName) != 7) {
- UtilWaitThread.sleep(200);
- }
- }
-
- private void writeFlush(Connector conn, String tablename, String row) throws Exception {
- BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
- Mutation m = new Mutation(row);
- m.put("", "", "");
- bw.addMutation(m);
- bw.close();
- conn.tableOperations().flush(tablename, null, null, true);
- }
-
- final static Random r = new Random();
-
- private void makeFile(Connector conn, String tablename) throws Exception {
- BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig());
- byte[] empty = {};
- byte[] row = new byte[10];
- r.nextBytes(row);
- Mutation m = new Mutation(row, 0, 10);
- m.put(empty, empty, empty);
- bw.addMutation(m);
- bw.flush();
- bw.close();
- conn.tableOperations().flush(tablename, null, null, true);
- }
-
- private void runTest(final Connector c, final String tableName, final int n) throws Exception {
- for (int i = countFiles(c, tableName); i < n - 1; i++)
- makeFile(c, tableName);
- Assert.assertEquals(n - 1, countFiles(c, tableName));
- makeFile(c, tableName);
- for (int i = 0; i < 10; i++) {
- int count = countFiles(c, tableName);
- assertTrue(count == 1 || count == n);
- if (count == 1)
- break;
- UtilWaitThread.sleep(1000);
- }
- }
-
- private int countFiles(Connector c, String tableName) throws Exception {
- Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
- return Iterators.size(s.iterator());
- }
-
-}
[03/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/replication/ReplicationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/ReplicationIT.java b/test/src/test/java/org/apache/accumulo/test/replication/ReplicationIT.java
deleted file mode 100644
index 77198df..0000000
--- a/test/src/test/java/org/apache/accumulo/test/replication/ReplicationIT.java
+++ /dev/null
@@ -1,1436 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.replication;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.IteratorSetting.Column;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.TableOfflineException;
-import org.apache.accumulo.core.client.ZooKeeperInstance;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.impl.KeyExtent;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.iterators.conf.ColumnSet;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
-import org.apache.accumulo.core.protobuf.ProtobufUtil;
-import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
-import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection;
-import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.replication.ReplicationTarget;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.tabletserver.log.LogEntry;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.ZooCache;
-import org.apache.accumulo.fate.zookeeper.ZooCacheFactory;
-import org.apache.accumulo.fate.zookeeper.ZooLock;
-import org.apache.accumulo.gc.SimpleGarbageCollector;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.server.log.WalStateManager;
-import org.apache.accumulo.server.log.WalStateManager.WalState;
-import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.replication.ReplicaSystemFactory;
-import org.apache.accumulo.server.replication.StatusCombiner;
-import org.apache.accumulo.server.replication.StatusFormatter;
-import org.apache.accumulo.server.replication.StatusUtil;
-import org.apache.accumulo.server.replication.proto.Replication.Status;
-import org.apache.accumulo.server.util.ReplicationTableUtil;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Function;
-import com.google.common.base.Joiner;
-import com.google.common.collect.HashMultimap;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Multimap;
-import com.google.common.collect.Sets;
-import com.google.protobuf.TextFormat;
-
-/**
- * Replication tests which verify expected functionality using a single MAC instance. A MockReplicaSystem is used to "fake" the peer instance that we're
- * replicating to. This lets us test replication in a functional way without having to worry about two real systems.
- */
-public class ReplicationIT extends ConfigurableMacBase {
- private static final Logger log = LoggerFactory.getLogger(ReplicationIT.class);
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60 * 10;
- }
-
- @Override
- public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- // Run the master replication loop run frequently
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "10s");
- cfg.setProperty(Property.MASTER_REPLICATION_SCAN_INTERVAL, "1s");
- cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
- cfg.setProperty(Property.TSERV_WALOG_MAX_SIZE, "1M");
- cfg.setProperty(Property.GC_CYCLE_START, "1s");
- cfg.setProperty(Property.GC_CYCLE_DELAY, "0");
- cfg.setProperty(Property.REPLICATION_NAME, "master");
- cfg.setProperty(Property.REPLICATION_WORK_PROCESSOR_DELAY, "1s");
- cfg.setProperty(Property.REPLICATION_WORK_PROCESSOR_PERIOD, "1s");
- cfg.setProperty(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX, "1M");
- cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
- cfg.setNumTservers(1);
- hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
- }
-
- private Multimap<String,String> getLogs(Connector conn) throws Exception {
- // Map of server to tableId
- Multimap<TServerInstance,String> serverToTableID = HashMultimap.create();
- Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- scanner.setRange(MetadataSchema.TabletsSection.getRange());
- scanner.fetchColumnFamily(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME);
- for (Entry<Key,Value> entry : scanner) {
- TServerInstance key = new TServerInstance(entry.getValue(), entry.getKey().getColumnQualifier());
- byte[] tableId = KeyExtent.tableOfMetadataRow(entry.getKey().getRow());
- serverToTableID.put(key, new String(tableId, UTF_8));
- }
- // Map of logs to tableId
- Multimap<String,String> logs = HashMultimap.create();
- Instance i = conn.getInstance();
- ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
- WalStateManager wals = new WalStateManager(conn.getInstance(), zk);
- for (Entry<TServerInstance,List<UUID>> entry : wals.getAllMarkers().entrySet()) {
- for (UUID id : entry.getValue()) {
- Pair<WalState,Path> state = wals.state(entry.getKey(), id);
- for (String tableId : serverToTableID.get(entry.getKey())) {
- logs.put(state.getSecond().toString(), tableId);
- }
- }
- }
- return logs;
- }
-
- private Multimap<String,String> getAllLogs(Connector conn) throws Exception {
- Multimap<String,String> logs = getLogs(conn);
- try {
- Scanner scanner = conn.createScanner(ReplicationTable.NAME, Authorizations.EMPTY);
- StatusSection.limit(scanner);
- Text buff = new Text();
- for (Entry<Key,Value> entry : scanner) {
- if (Thread.interrupted()) {
- Thread.currentThread().interrupt();
- return logs;
- }
-
- StatusSection.getFile(entry.getKey(), buff);
- String file = buff.toString();
- StatusSection.getTableId(entry.getKey(), buff);
- String tableId = buff.toString();
-
- logs.put(file, tableId);
- }
- } catch (TableOfflineException e) {
- log.debug("Replication table isn't online yet");
- }
- return logs;
- }
-
- private void waitForGCLock(Connector conn) throws InterruptedException {
- // Check if the GC process has the lock before wasting our retry attempts
- ZooKeeperInstance zki = (ZooKeeperInstance) conn.getInstance();
- ZooCacheFactory zcf = new ZooCacheFactory();
- ZooCache zcache = zcf.getZooCache(zki.getZooKeepers(), zki.getZooKeepersSessionTimeOut());
- String zkPath = ZooUtil.getRoot(conn.getInstance()) + Constants.ZGC_LOCK;
- log.info("Looking for GC lock at {}", zkPath);
- byte[] data = ZooLock.getLockData(zcache, zkPath, null);
- while (null == data) {
- log.info("Waiting for GC ZooKeeper lock to be acquired");
- Thread.sleep(1000);
- data = ZooLock.getLockData(zcache, zkPath, null);
- }
- }
-
- @Test
- public void replicationTableCreated() throws AccumuloException, AccumuloSecurityException {
- Assert.assertTrue(getConnector().tableOperations().exists(ReplicationTable.NAME));
- Assert.assertEquals(ReplicationTable.ID, getConnector().tableOperations().tableIdMap().get(ReplicationTable.NAME));
- }
-
- @Test
- public void verifyReplicationTableConfig() throws AccumuloException, TableNotFoundException, AccumuloSecurityException {
- TableOperations tops = getConnector().tableOperations();
- Map<String,EnumSet<IteratorScope>> iterators = tops.listIterators(ReplicationTable.NAME);
-
- // verify combiners are only iterators (no versioning)
- Assert.assertEquals(1, iterators.size());
-
- // look for combiner
- Assert.assertTrue(iterators.containsKey(ReplicationTable.COMBINER_NAME));
- Assert.assertTrue(iterators.get(ReplicationTable.COMBINER_NAME).containsAll(EnumSet.allOf(IteratorScope.class)));
- for (IteratorScope scope : EnumSet.allOf(IteratorScope.class)) {
- IteratorSetting is = tops.getIteratorSetting(ReplicationTable.NAME, ReplicationTable.COMBINER_NAME, scope);
- Assert.assertEquals(30, is.getPriority());
- Assert.assertEquals(StatusCombiner.class.getName(), is.getIteratorClass());
- Assert.assertEquals(1, is.getOptions().size());
- Assert.assertTrue(is.getOptions().containsKey("columns"));
- String cols = is.getOptions().get("columns");
- Column statusSectionCol = new Column(StatusSection.NAME);
- Column workSectionCol = new Column(WorkSection.NAME);
- Assert.assertEquals(
- ColumnSet.encodeColumns(statusSectionCol.getColumnFamily(), statusSectionCol.getColumnQualifier()) + ","
- + ColumnSet.encodeColumns(workSectionCol.getColumnFamily(), workSectionCol.getColumnQualifier()), cols);
- }
-
- boolean foundLocalityGroups = false;
- boolean foundLocalityGroupDef1 = false;
- boolean foundLocalityGroupDef2 = false;
- boolean foundFormatter = false;
- Joiner j = Joiner.on(",");
- Function<Text,String> textToString = new Function<Text,String>() {
- @Override
- public String apply(Text text) {
- return text.toString();
- }
- };
- for (Entry<String,String> p : tops.getProperties(ReplicationTable.NAME)) {
- String key = p.getKey();
- String val = p.getValue();
- // STATUS_LG_NAME, STATUS_LG_COLFAMS, WORK_LG_NAME, WORK_LG_COLFAMS
- if (key.equals(Property.TABLE_FORMATTER_CLASS.getKey()) && val.equals(StatusFormatter.class.getName())) {
- // look for formatter
- foundFormatter = true;
- } else if (key.equals(Property.TABLE_LOCALITY_GROUPS.getKey()) && val.equals(j.join(ReplicationTable.LOCALITY_GROUPS.keySet()))) {
- // look for locality groups enabled
- foundLocalityGroups = true;
- } else if (key.startsWith(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey())) {
- // look for locality group column family definitions
- if (key.equals(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + ReplicationTable.STATUS_LG_NAME)
- && val.equals(j.join(Iterables.transform(ReplicationTable.STATUS_LG_COLFAMS, textToString)))) {
- foundLocalityGroupDef1 = true;
- } else if (key.equals(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + ReplicationTable.WORK_LG_NAME)
- && val.equals(j.join(Iterables.transform(ReplicationTable.WORK_LG_COLFAMS, textToString)))) {
- foundLocalityGroupDef2 = true;
- }
- }
- }
- Assert.assertTrue(foundLocalityGroups);
- Assert.assertTrue(foundLocalityGroupDef1);
- Assert.assertTrue(foundLocalityGroupDef2);
- Assert.assertTrue(foundFormatter);
- }
-
- @Test
- public void correctRecordsCompleteFile() throws Exception {
- Connector conn = getConnector();
- String table = "table1";
- conn.tableOperations().create(table);
- // If we have more than one tserver, this is subject to a race condition.
- conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
-
- BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
- for (int i = 0; i < 10; i++) {
- Mutation m = new Mutation(Integer.toString(i));
- m.put(new byte[0], new byte[0], new byte[0]);
- bw.addMutation(m);
- }
-
- bw.close();
-
- // After writing data, we'll get a replication table online
- boolean online = ReplicationTable.isOnline(conn);
- int attempts = 10;
- do {
- if (!online) {
- UtilWaitThread.sleep(2000);
- online = ReplicationTable.isOnline(conn);
- attempts--;
- }
- } while (!online && attempts > 0);
- Assert.assertTrue("Replication table was not online", online);
-
- for (int i = 0; i < 5; i++) {
- if (conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ)) {
- break;
- }
- log.info("Could not read replication table, waiting and will retry");
- Thread.sleep(2000);
- }
-
- Assert.assertTrue("'root' user could not read the replication table",
- conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ));
-
- Set<String> replRows = Sets.newHashSet();
- Scanner scanner;
- attempts = 5;
- while (replRows.isEmpty() && attempts > 0) {
- scanner = ReplicationTable.getScanner(conn);
- StatusSection.limit(scanner);
- for (Entry<Key,Value> entry : scanner) {
- Key k = entry.getKey();
-
- String fileUri = k.getRow().toString();
- try {
- new URI(fileUri);
- } catch (URISyntaxException e) {
- Assert.fail("Expected a valid URI: " + fileUri);
- }
-
- replRows.add(fileUri);
- }
- }
-
- Set<String> wals = Sets.newHashSet();
- attempts = 5;
- Instance i = conn.getInstance();
- ZooReaderWriter zk = new ZooReaderWriter(i.getZooKeepers(), i.getZooKeepersSessionTimeOut(), "");
- while (wals.isEmpty() && attempts > 0) {
- WalStateManager markers = new WalStateManager(i, zk);
- for (Entry<Path,WalState> entry : markers.getAllState().entrySet()) {
- wals.add(entry.getKey().toString());
- }
- attempts--;
- }
-
- // We only have one file that should need replication (no trace table)
- // We should find an entry in tablet and in the repl row
- Assert.assertEquals("Rows found: " + replRows, 1, replRows.size());
-
- // There should only be one extra WALog that replication doesn't know about
- replRows.removeAll(wals);
- Assert.assertEquals(2, wals.size());
- Assert.assertEquals(0, replRows.size());
- }
-
- @Test
- public void noRecordsWithoutReplication() throws Exception {
- Connector conn = getConnector();
- List<String> tables = new ArrayList<>();
-
- // replication shouldn't be online when we begin
- Assert.assertFalse(ReplicationTable.isOnline(conn));
-
- for (int i = 0; i < 5; i++) {
- String name = "table" + i;
- tables.add(name);
- conn.tableOperations().create(name);
- }
-
- // nor after we create some tables (that aren't being replicated)
- Assert.assertFalse(ReplicationTable.isOnline(conn));
-
- for (String table : tables) {
- writeSomeData(conn, table, 5, 5);
- }
-
- // After writing data, still no replication table
- Assert.assertFalse(ReplicationTable.isOnline(conn));
-
- for (String table : tables) {
- conn.tableOperations().compact(table, null, null, true, true);
- }
-
- // After compacting data, still no replication table
- Assert.assertFalse(ReplicationTable.isOnline(conn));
-
- for (String table : tables) {
- conn.tableOperations().delete(table);
- }
-
- // After deleting tables, still no replication table
- Assert.assertFalse(ReplicationTable.isOnline(conn));
- }
-
- @Test
- public void twoEntriesForTwoTables() throws Exception {
- Connector conn = getConnector();
- String table1 = "table1", table2 = "table2";
-
- // replication shouldn't exist when we begin
- Assert.assertFalse("Replication table already online at the beginning of the test", ReplicationTable.isOnline(conn));
-
- // Create two tables
- conn.tableOperations().create(table1);
- conn.tableOperations().create(table2);
-
- // Enable replication on table1
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
-
- // Despite having replication on, we shouldn't have any need to write a record to it (and bring it online)
- Assert.assertFalse(ReplicationTable.isOnline(conn));
-
- // Write some data to table1
- writeSomeData(conn, table1, 50, 50);
-
- // After the commit for these mutations finishes, we'll get a replication entry in accumulo.metadata for table1
- // Don't want to compact table1 as it ultimately cause the entry in accumulo.metadata to be removed before we can verify it's there
-
- // After writing data, we'll get a replication table online
- boolean online = ReplicationTable.isOnline(conn);
- int attempts = 10;
- do {
- if (!online) {
- UtilWaitThread.sleep(5000);
- online = ReplicationTable.isOnline(conn);
- attempts--;
- }
- } while (!online && attempts > 0);
- Assert.assertTrue("Replication table did not exist", online);
-
- Assert.assertTrue(ReplicationTable.isOnline(conn));
- conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.READ);
-
- // Verify that we found a single replication record that's for table1
- Scanner s = ReplicationTable.getScanner(conn);
- StatusSection.limit(s);
- Iterator<Entry<Key,Value>> iter = s.iterator();
- attempts = 5;
- while (attempts > 0) {
- if (!iter.hasNext()) {
- s.close();
- Thread.sleep(1000);
- s = ReplicationTable.getScanner(conn);
- iter = s.iterator();
- attempts--;
- } else {
- break;
- }
- }
- Assert.assertTrue(iter.hasNext());
- Entry<Key,Value> entry = iter.next();
- // We should at least find one status record for this table, we might find a second if another log was started from ingesting the data
- Assert.assertEquals("Expected to find replication entry for " + table1, conn.tableOperations().tableIdMap().get(table1), entry.getKey()
- .getColumnQualifier().toString());
- s.close();
-
- // Enable replication on table2
- conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION.getKey(), "true");
-
- // Write some data to table2
- writeSomeData(conn, table2, 50, 50);
-
- // After the commit on these mutations, we'll get a replication entry in accumulo.metadata for table2
- // Don't want to compact table2 as it ultimately cause the entry in accumulo.metadata to be removed before we can verify it's there
-
- // After writing data, we'll get a replication table online
- Assert.assertTrue(ReplicationTable.isOnline(conn));
- conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.READ);
-
- Set<String> tableIds = Sets.newHashSet(conn.tableOperations().tableIdMap().get(table1), conn.tableOperations().tableIdMap().get(table2));
- Set<String> tableIdsForMetadata = Sets.newHashSet(tableIds);
-
- // Wait to make sure the table permission propagate
- Thread.sleep(5000);
-
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(MetadataSchema.ReplicationSection.getRange());
-
- List<Entry<Key,Value>> records = new ArrayList<>();
- for (Entry<Key,Value> metadata : s) {
- records.add(metadata);
- }
-
- Assert.assertEquals("Expected to find 2 records, but actually found " + records, 2, records.size());
-
- for (Entry<Key,Value> metadata : records) {
- Assert.assertTrue("Expected record to be in metadata but wasn't " + metadata.getKey().toStringNoTruncate() + ", tableIds remaining "
- + tableIdsForMetadata, tableIdsForMetadata.remove(metadata.getKey().getColumnQualifier().toString()));
- }
-
- Assert.assertTrue("Expected that we had removed all metadata entries " + tableIdsForMetadata, tableIdsForMetadata.isEmpty());
-
- // Should be creating these records in replication table from metadata table every second
- Thread.sleep(5000);
-
- // Verify that we found two replication records: one for table1 and one for table2
- s = ReplicationTable.getScanner(conn);
- StatusSection.limit(s);
- iter = s.iterator();
- Assert.assertTrue("Found no records in replication table", iter.hasNext());
- entry = iter.next();
- Assert.assertTrue("Expected to find element in replication table", tableIds.remove(entry.getKey().getColumnQualifier().toString()));
- Assert.assertTrue("Expected to find two elements in replication table, only found one ", iter.hasNext());
- entry = iter.next();
- Assert.assertTrue("Expected to find element in replication table", tableIds.remove(entry.getKey().getColumnQualifier().toString()));
- Assert.assertFalse("Expected to only find two elements in replication table", iter.hasNext());
- }
-
- private void writeSomeData(Connector conn, String table, int rows, int cols) throws Exception {
- BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
- for (int row = 0; row < rows; row++) {
- Mutation m = new Mutation(Integer.toString(row));
- for (int col = 0; col < cols; col++) {
- String value = Integer.toString(col);
- m.put(value, "", value);
- }
- bw.addMutation(m);
- }
- bw.close();
- }
-
- @Test
- public void replicationEntriesPrecludeWalDeletion() throws Exception {
- final Connector conn = getConnector();
- String table1 = "table1", table2 = "table2", table3 = "table3";
- final Multimap<String,String> logs = HashMultimap.create();
- final AtomicBoolean keepRunning = new AtomicBoolean(true);
-
- Thread t = new Thread(new Runnable() {
- @Override
- public void run() {
- // Should really be able to interrupt here, but the Scanner throws a fit to the logger
- // when that happens
- while (keepRunning.get()) {
- try {
- logs.putAll(getAllLogs(conn));
- } catch (Exception e) {
- log.error("Error getting logs", e);
- }
- }
- }
-
- });
-
- t.start();
-
- conn.tableOperations().create(table1);
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
- Thread.sleep(2000);
-
- // Write some data to table1
- writeSomeData(conn, table1, 200, 500);
-
- conn.tableOperations().create(table2);
- conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION.getKey(), "true");
- conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
- Thread.sleep(2000);
-
- writeSomeData(conn, table2, 200, 500);
-
- conn.tableOperations().create(table3);
- conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION.getKey(), "true");
- conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
- Thread.sleep(2000);
-
- writeSomeData(conn, table3, 200, 500);
-
- // Force a write to metadata for the data written
- for (String table : Arrays.asList(table1, table2, table3)) {
- conn.tableOperations().flush(table, null, null, true);
- }
-
- keepRunning.set(false);
- t.join(5000);
-
- // The master is only running every second to create records in the replication table from the metadata table
- // Sleep a sufficient amount of time to ensure that we get the straggling WALs that might have been created at the end
- Thread.sleep(5000);
-
- Scanner s = ReplicationTable.getScanner(conn);
- StatusSection.limit(s);
- Set<String> replFiles = new HashSet<>();
- for (Entry<Key,Value> entry : s) {
- replFiles.add(entry.getKey().getRow().toString());
- }
-
- // We might have a WAL that was use solely for the replication table
- // We want to remove that from our list as it should not appear in the replication table
- String replicationTableId = conn.tableOperations().tableIdMap().get(ReplicationTable.NAME);
- Iterator<Entry<String,String>> observedLogs = logs.entries().iterator();
- while (observedLogs.hasNext()) {
- Entry<String,String> observedLog = observedLogs.next();
- if (replicationTableId.equals(observedLog.getValue())) {
- log.info("Removing {} because its tableId is for the replication table", observedLog);
- observedLogs.remove();
- }
- }
-
- // We should have *some* reference to each log that was seen in the metadata table
- // They might not yet all be closed though (might be newfile)
- Assert.assertTrue("Metadata log distribution: " + logs + "replFiles " + replFiles, logs.keySet().containsAll(replFiles));
- Assert.assertTrue("Difference between replication entries and current logs is bigger than one", logs.keySet().size() - replFiles.size() <= 1);
-
- final Configuration conf = new Configuration();
- for (String replFile : replFiles) {
- Path p = new Path(replFile);
- FileSystem fs = p.getFileSystem(conf);
- Assert.assertTrue("File does not exist anymore, it was likely incorrectly garbage collected: " + p, fs.exists(p));
- }
- }
-
- @Test
- public void combinerWorksOnMetadata() throws Exception {
- Connector conn = getConnector();
-
- conn.securityOperations().grantTablePermission("root", MetadataTable.NAME, TablePermission.WRITE);
-
- ReplicationTableUtil.configureMetadataTable(conn, MetadataTable.NAME);
-
- Status stat1 = StatusUtil.fileCreated(100);
- Status stat2 = StatusUtil.fileClosed();
-
- BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
- Mutation m = new Mutation(ReplicationSection.getRowPrefix() + "file:/accumulo/wals/tserver+port/uuid");
- m.put(ReplicationSection.COLF, new Text("1"), ProtobufUtil.toValue(stat1));
- bw.addMutation(m);
- bw.close();
-
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(ReplicationSection.getRange());
-
- Status actual = Status.parseFrom(Iterables.getOnlyElement(s).getValue().get());
- Assert.assertEquals(stat1, actual);
-
- bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
- m = new Mutation(ReplicationSection.getRowPrefix() + "file:/accumulo/wals/tserver+port/uuid");
- m.put(ReplicationSection.COLF, new Text("1"), ProtobufUtil.toValue(stat2));
- bw.addMutation(m);
- bw.close();
-
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(ReplicationSection.getRange());
-
- actual = Status.parseFrom(Iterables.getOnlyElement(s).getValue().get());
- Status expected = Status.newBuilder().setBegin(0).setEnd(0).setClosed(true).setInfiniteEnd(true).setCreatedTime(100).build();
-
- Assert.assertEquals(expected, actual);
- }
-
- @Test
- public void noDeadlock() throws Exception {
- final Connector conn = getConnector();
-
- ReplicationTable.setOnline(conn);
- conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.WRITE);
- conn.tableOperations().deleteRows(ReplicationTable.NAME, null, null);
-
- String table1 = "table1", table2 = "table2", table3 = "table3";
- conn.tableOperations().create(table1);
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
- conn.tableOperations().create(table2);
- conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION.getKey(), "true");
- conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
- conn.tableOperations().create(table3);
- conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION.getKey(), "true");
- conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
-
- writeSomeData(conn, table1, 200, 500);
-
- writeSomeData(conn, table2, 200, 500);
-
- writeSomeData(conn, table3, 200, 500);
-
- // Flush everything to try to make the replication records
- for (String table : Arrays.asList(table1, table2, table3)) {
- conn.tableOperations().flush(table, null, null, true);
- }
-
- // Flush everything to try to make the replication records
- for (String table : Arrays.asList(table1, table2, table3)) {
- conn.tableOperations().flush(table, null, null, true);
- }
-
- for (String table : Arrays.asList(MetadataTable.NAME, table1, table2, table3)) {
- Iterators.size(conn.createScanner(table, Authorizations.EMPTY).iterator());
- }
- }
-
- @Test
- public void filesClosedAfterUnused() throws Exception {
- Connector conn = getConnector();
-
- String table = "table";
- conn.tableOperations().create(table);
- String tableId = conn.tableOperations().tableIdMap().get(table);
-
- Assert.assertNotNull(tableId);
-
- conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION.getKey(), "true");
- conn.tableOperations().setProperty(table, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
- // just sleep
- conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
- ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "50000"));
-
- // Write a mutation to make a log file
- BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
- Mutation m = new Mutation("one");
- m.put("", "", "");
- bw.addMutation(m);
- bw.close();
-
- // Write another to make sure the logger rolls itself?
- bw = conn.createBatchWriter(table, new BatchWriterConfig());
- m = new Mutation("three");
- m.put("", "", "");
- bw.addMutation(m);
- bw.close();
-
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.fetchColumnFamily(TabletsSection.LogColumnFamily.NAME);
- s.setRange(TabletsSection.getRange(tableId));
- Set<String> wals = new HashSet<>();
- for (Entry<Key,Value> entry : s) {
- LogEntry logEntry = LogEntry.fromKeyValue(entry.getKey(), entry.getValue());
- wals.add(new Path(logEntry.filename).toString());
- }
-
- log.warn("Found wals {}", wals);
-
- bw = conn.createBatchWriter(table, new BatchWriterConfig());
- m = new Mutation("three");
- byte[] bytes = new byte[1024 * 1024];
- m.put("1".getBytes(), new byte[0], bytes);
- m.put("2".getBytes(), new byte[0], bytes);
- m.put("3".getBytes(), new byte[0], bytes);
- m.put("4".getBytes(), new byte[0], bytes);
- m.put("5".getBytes(), new byte[0], bytes);
- bw.addMutation(m);
- bw.close();
-
- conn.tableOperations().flush(table, null, null, true);
-
- while (!ReplicationTable.isOnline(conn)) {
- UtilWaitThread.sleep(2000);
- }
-
- for (int i = 0; i < 10; i++) {
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.fetchColumnFamily(LogColumnFamily.NAME);
- s.setRange(TabletsSection.getRange(tableId));
- for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + "=" + entry.getValue());
- }
-
- try {
- s = ReplicationTable.getScanner(conn);
- StatusSection.limit(s);
- Text buff = new Text();
- boolean allReferencedLogsClosed = true;
- int recordsFound = 0;
- for (Entry<Key,Value> e : s) {
- recordsFound++;
- allReferencedLogsClosed = true;
- StatusSection.getFile(e.getKey(), buff);
- String file = buff.toString();
- if (wals.contains(file)) {
- Status stat = Status.parseFrom(e.getValue().get());
- if (!stat.getClosed()) {
- log.info("{} wasn't closed", file);
- allReferencedLogsClosed = false;
- }
- }
- }
-
- if (recordsFound > 0 && allReferencedLogsClosed) {
- return;
- }
- Thread.sleep(2000);
- } catch (RuntimeException e) {
- Throwable cause = e.getCause();
- if (cause instanceof AccumuloSecurityException) {
- AccumuloSecurityException ase = (AccumuloSecurityException) cause;
- switch (ase.getSecurityErrorCode()) {
- case PERMISSION_DENIED:
- // We tried to read the replication table before the GRANT went through
- Thread.sleep(2000);
- break;
- default:
- throw e;
- }
- }
- }
- }
-
- Assert.fail("We had a file that was referenced but didn't get closed");
- }
-
- @Test
- public void singleTableWithSingleTarget() throws Exception {
- // We want to kill the GC so it doesn't come along and close Status records and mess up the comparisons
- // against expected Status messages.
- getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
-
- Connector conn = getConnector();
- String table1 = "table1";
-
- // replication shouldn't be online when we begin
- Assert.assertFalse(ReplicationTable.isOnline(conn));
-
- // Create a table
- conn.tableOperations().create(table1);
-
- int attempts = 10;
-
- // Might think the table doesn't yet exist, retry
- while (attempts > 0) {
- try {
- // Enable replication on table1
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
- // Replicate table1 to cluster1 in the table with id of '4'
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "4");
- // Sleep for 100 seconds before saying something is replicated
- conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
- ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "100000"));
- break;
- } catch (Exception e) {
- attempts--;
- if (attempts <= 0) {
- throw e;
- }
- UtilWaitThread.sleep(2000);
- }
- }
-
- // Write some data to table1
- writeSomeData(conn, table1, 2000, 50);
-
- // Make sure the replication table is online at this point
- boolean online = ReplicationTable.isOnline(conn);
- attempts = 10;
- do {
- if (!online) {
- UtilWaitThread.sleep(2000);
- online = ReplicationTable.isOnline(conn);
- attempts--;
- }
- } while (!online && attempts > 0);
- Assert.assertTrue("Replication table was never created", online);
-
- // ACCUMULO-2743 The Observer in the tserver has to be made aware of the change to get the combiner (made by the master)
- for (int i = 0; i < 10 && !conn.tableOperations().listIterators(ReplicationTable.NAME).keySet().contains(ReplicationTable.COMBINER_NAME); i++) {
- UtilWaitThread.sleep(2000);
- }
-
- Assert.assertTrue("Combiner was never set on replication table",
- conn.tableOperations().listIterators(ReplicationTable.NAME).keySet().contains(ReplicationTable.COMBINER_NAME));
-
- // Trigger the minor compaction, waiting for it to finish.
- // This should write the entry to metadata that the file has data
- conn.tableOperations().flush(table1, null, null, true);
-
- // Make sure that we have one status element, should be a new file
- Scanner s = ReplicationTable.getScanner(conn);
- StatusSection.limit(s);
- Entry<Key,Value> entry = null;
- Status expectedStatus = StatusUtil.openWithUnknownLength();
- attempts = 10;
- // This record will move from new to new with infinite length because of the minc (flush)
- while (null == entry && attempts > 0) {
- try {
- entry = Iterables.getOnlyElement(s);
- Status actual = Status.parseFrom(entry.getValue().get());
- if (actual.getInfiniteEnd() != expectedStatus.getInfiniteEnd()) {
- entry = null;
- // the master process didn't yet fire and write the new mutation, wait for it to do
- // so and try to read it again
- Thread.sleep(1000);
- }
- } catch (NoSuchElementException e) {
- entry = null;
- Thread.sleep(500);
- } catch (IllegalArgumentException e) {
- // saw this contain 2 elements once
- s = ReplicationTable.getScanner(conn);
- StatusSection.limit(s);
- for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
- }
- throw e;
- } finally {
- attempts--;
- }
- }
-
- Assert.assertNotNull("Could not find expected entry in replication table", entry);
- Status actual = Status.parseFrom(entry.getValue().get());
- Assert.assertTrue("Expected to find a replication entry that is open with infinite length: " + ProtobufUtil.toString(actual),
- !actual.getClosed() && actual.getInfiniteEnd());
-
- // Try a couple of times to watch for the work record to be created
- boolean notFound = true;
- for (int i = 0; i < 10 && notFound; i++) {
- s = ReplicationTable.getScanner(conn);
- WorkSection.limit(s);
- int elementsFound = Iterables.size(s);
- if (0 < elementsFound) {
- Assert.assertEquals(1, elementsFound);
- notFound = false;
- }
- Thread.sleep(500);
- }
-
- // If we didn't find the work record, print the contents of the table
- if (notFound) {
- s = ReplicationTable.getScanner(conn);
- for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
- }
- Assert.assertFalse("Did not find the work entry for the status entry", notFound);
- }
-
- // Write some more data so that we over-run the single WAL
- writeSomeData(conn, table1, 3000, 50);
-
- log.info("Issued compaction for table");
- conn.tableOperations().compact(table1, null, null, true, true);
- log.info("Compaction completed");
-
- // Master is creating entries in the replication table from the metadata table every second.
- // Compaction should trigger the record to be written to metadata. Wait a bit to ensure
- // that the master has time to work.
- Thread.sleep(5000);
-
- s = ReplicationTable.getScanner(conn);
- StatusSection.limit(s);
- int numRecords = 0;
- for (Entry<Key,Value> e : s) {
- numRecords++;
- log.info("Found status record {}\t{}", e.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(e.getValue().get())));
- }
-
- Assert.assertEquals(2, numRecords);
-
- // We should eventually get 2 work records recorded, need to account for a potential delay though
- // might see: status1 -> work1 -> status2 -> (our scans) -> work2
- notFound = true;
- for (int i = 0; i < 10 && notFound; i++) {
- s = ReplicationTable.getScanner(conn);
- WorkSection.limit(s);
- int elementsFound = Iterables.size(s);
- if (2 == elementsFound) {
- notFound = false;
- }
- Thread.sleep(500);
- }
-
- // If we didn't find the work record, print the contents of the table
- if (notFound) {
- s = ReplicationTable.getScanner(conn);
- for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
- }
- Assert.assertFalse("Did not find the work entries for the status entries", notFound);
- }
- }
-
- @Test
- public void correctClusterNameInWorkEntry() throws Exception {
- Connector conn = getConnector();
- String table1 = "table1";
-
- // replication shouldn't be online when we begin
- Assert.assertFalse(ReplicationTable.isOnline(conn));
-
- // Create two tables
- conn.tableOperations().create(table1);
-
- int attempts = 5;
- while (attempts > 0) {
- try {
- // Enable replication on table1
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
- // Replicate table1 to cluster1 in the table with id of '4'
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "4");
- attempts = 0;
- } catch (Exception e) {
- attempts--;
- if (attempts <= 0) {
- throw e;
- }
- UtilWaitThread.sleep(500);
- }
- }
-
- // Write some data to table1
- writeSomeData(conn, table1, 2000, 50);
- conn.tableOperations().flush(table1, null, null, true);
-
- String tableId = conn.tableOperations().tableIdMap().get(table1);
- Assert.assertNotNull("Table ID was null", tableId);
-
- // Make sure the replication table exists at this point
- boolean online = ReplicationTable.isOnline(conn);
- attempts = 5;
- do {
- if (!online) {
- UtilWaitThread.sleep(500);
- online = ReplicationTable.isOnline(conn);
- attempts--;
- }
- } while (!online && attempts > 0);
- Assert.assertTrue("Replication table did not exist", online);
-
- for (int i = 0; i < 5 && !conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ); i++) {
- Thread.sleep(1000);
- }
-
- Assert.assertTrue(conn.securityOperations().hasTablePermission("root", ReplicationTable.NAME, TablePermission.READ));
-
- boolean notFound = true;
- Scanner s;
- for (int i = 0; i < 10 && notFound; i++) {
- s = ReplicationTable.getScanner(conn);
- WorkSection.limit(s);
- try {
- Entry<Key,Value> e = Iterables.getOnlyElement(s);
- Text expectedColqual = new ReplicationTarget("cluster1", "4", tableId).toText();
- Assert.assertEquals(expectedColqual, e.getKey().getColumnQualifier());
- notFound = false;
- } catch (NoSuchElementException e) {} catch (IllegalArgumentException e) {
- s = ReplicationTable.getScanner(conn);
- for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
- }
- Assert.fail("Found more than one work section entry");
- }
-
- Thread.sleep(500);
- }
-
- if (notFound) {
- s = ReplicationTable.getScanner(conn);
- for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
- }
- Assert.assertFalse("Did not find the work entry for the status entry", notFound);
- }
- }
-
- @Test
- public void replicationRecordsAreClosedAfterGarbageCollection() throws Exception {
- getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
-
- final Connector conn = getConnector();
-
- ReplicationTable.setOnline(conn);
- conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.WRITE);
- conn.tableOperations().deleteRows(ReplicationTable.NAME, null, null);
-
- final AtomicBoolean keepRunning = new AtomicBoolean(true);
- final Set<String> metadataWals = new HashSet<>();
-
- Thread t = new Thread(new Runnable() {
- @Override
- public void run() {
- // Should really be able to interrupt here, but the Scanner throws a fit to the logger
- // when that happens
- while (keepRunning.get()) {
- try {
- metadataWals.addAll(getLogs(conn).keySet());
- } catch (Exception e) {
- log.error("Metadata table doesn't exist");
- }
- }
- }
-
- });
-
- t.start();
-
- String table1 = "table1", table2 = "table2", table3 = "table3";
-
- try {
- conn.tableOperations().create(table1);
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
- conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
- ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, null));
-
- // Write some data to table1
- writeSomeData(conn, table1, 200, 500);
-
- conn.tableOperations().create(table2);
- conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION.getKey(), "true");
- conn.tableOperations().setProperty(table2, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
-
- writeSomeData(conn, table2, 200, 500);
-
- conn.tableOperations().create(table3);
- conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION.getKey(), "true");
- conn.tableOperations().setProperty(table3, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "1");
-
- writeSomeData(conn, table3, 200, 500);
-
- // Flush everything to try to make the replication records
- for (String table : Arrays.asList(table1, table2, table3)) {
- conn.tableOperations().compact(table, null, null, true, true);
- }
- } finally {
- keepRunning.set(false);
- t.join(5000);
- Assert.assertFalse(t.isAlive());
- }
-
- // Kill the tserver(s) and restart them
- // to ensure that the WALs we previously observed all move to closed.
- cluster.getClusterControl().stop(ServerType.TABLET_SERVER);
- cluster.getClusterControl().start(ServerType.TABLET_SERVER);
-
- // Make sure we can read all the tables (recovery complete)
- for (String table : Arrays.asList(table1, table2, table3)) {
- Iterators.size(conn.createScanner(table, Authorizations.EMPTY).iterator());
- }
-
- // Starting the gc will run CloseWriteAheadLogReferences which will first close Statuses
- // in the metadata table, and then in the replication table
- Process gc = cluster.exec(SimpleGarbageCollector.class);
-
- waitForGCLock(conn);
-
- Thread.sleep(1000);
-
- log.info("GC is up and should have had time to run at least once by now");
-
- try {
- boolean allClosed = true;
-
- // We should either find all closed records or no records
- // After they're closed, they are candidates for deletion
- for (int i = 0; i < 10; i++) {
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(Range.prefix(ReplicationSection.getRowPrefix()));
- Iterator<Entry<Key,Value>> iter = s.iterator();
-
- long recordsFound = 0l;
- while (allClosed && iter.hasNext()) {
- Entry<Key,Value> entry = iter.next();
- String wal = entry.getKey().getRow().toString();
- if (metadataWals.contains(wal)) {
- Status status = Status.parseFrom(entry.getValue().get());
- log.info("{}={}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(status));
- allClosed &= status.getClosed();
- recordsFound++;
- }
- }
-
- log.info("Found {} records from the metadata table", recordsFound);
- if (allClosed) {
- break;
- }
-
- UtilWaitThread.sleep(2000);
- }
-
- if (!allClosed) {
- Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(Range.prefix(ReplicationSection.getRowPrefix()));
- for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + " " + ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
- }
- Assert.fail("Expected all replication records in the metadata table to be closed");
- }
-
- for (int i = 0; i < 10; i++) {
- allClosed = true;
-
- Scanner s = ReplicationTable.getScanner(conn);
- Iterator<Entry<Key,Value>> iter = s.iterator();
-
- long recordsFound = 0l;
- while (allClosed && iter.hasNext()) {
- Entry<Key,Value> entry = iter.next();
- String wal = entry.getKey().getRow().toString();
- if (metadataWals.contains(wal)) {
- Status status = Status.parseFrom(entry.getValue().get());
- log.info("{}={}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(status));
- allClosed &= status.getClosed();
- recordsFound++;
- }
- }
-
- log.info("Found {} records from the replication table", recordsFound);
- if (allClosed) {
- break;
- }
-
- UtilWaitThread.sleep(3000);
- }
-
- if (!allClosed) {
- Scanner s = ReplicationTable.getScanner(conn);
- StatusSection.limit(s);
- for (Entry<Key,Value> entry : s) {
- log.info(entry.getKey().toStringNoTruncate() + " " + TextFormat.shortDebugString(Status.parseFrom(entry.getValue().get())));
- }
- Assert.fail("Expected all replication records in the replication table to be closed");
- }
-
- } finally {
- gc.destroy();
- gc.waitFor();
- }
-
- }
-
- @Test
- public void replicatedStatusEntriesAreDeleted() throws Exception {
- // Just stop it now, we'll restart it after we restart the tserver
- getCluster().getClusterControl().stop(ServerType.GARBAGE_COLLECTOR);
-
- final Connector conn = getConnector();
- log.info("Got connector to MAC");
- String table1 = "table1";
-
- // replication shouldn't be online when we begin
- Assert.assertFalse(ReplicationTable.isOnline(conn));
-
- // Create two tables
- conn.tableOperations().create(table1);
-
- int attempts = 5;
- while (attempts > 0) {
- try {
- // Enable replication on table1
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION.getKey(), "true");
- // Replicate table1 to cluster1 in the table with id of '4'
- conn.tableOperations().setProperty(table1, Property.TABLE_REPLICATION_TARGET.getKey() + "cluster1", "4");
- // Use the MockReplicaSystem impl and sleep for 5seconds
- conn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + "cluster1",
- ReplicaSystemFactory.getPeerConfigurationValue(MockReplicaSystem.class, "1000"));
- attempts = 0;
- } catch (Exception e) {
- attempts--;
- if (attempts <= 0) {
- throw e;
- }
- UtilWaitThread.sleep(500);
- }
- }
-
- String tableId = conn.tableOperations().tableIdMap().get(table1);
- Assert.assertNotNull("Could not determine table id for " + table1, tableId);
-
- // Write some data to table1
- writeSomeData(conn, table1, 2000, 50);
- conn.tableOperations().flush(table1, null, null, true);
-
- // Make sure the replication table exists at this point
- boolean online = ReplicationTable.isOnline(conn);
- attempts = 10;
- do {
- if (!online) {
- UtilWaitThread.sleep(1000);
- online = ReplicationTable.isOnline(conn);
- attempts--;
- }
- } while (!online && attempts > 0);
- Assert.assertTrue("Replication table did not exist", online);
-
- // Grant ourselves the write permission for later
- conn.securityOperations().grantTablePermission("root", ReplicationTable.NAME, TablePermission.WRITE);
-
- log.info("Checking for replication entries in replication");
- // Then we need to get those records over to the replication table
- Scanner s;
- Set<String> entries = new HashSet<>();
- for (int i = 0; i < 5; i++) {
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(ReplicationSection.getRange());
- entries.clear();
- for (Entry<Key,Value> entry : s) {
- entries.add(entry.getKey().getRow().toString());
- log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
- }
- if (!entries.isEmpty()) {
- log.info("Replication entries {}", entries);
- break;
- }
- Thread.sleep(1000);
- }
-
- Assert.assertFalse("Did not find any replication entries in the replication table", entries.isEmpty());
-
- // Find the WorkSection record that will be created for that data we ingested
- boolean notFound = true;
- for (int i = 0; i < 10 && notFound; i++) {
- try {
- s = ReplicationTable.getScanner(conn);
- WorkSection.limit(s);
- Entry<Key,Value> e = Iterables.getOnlyElement(s);
- log.info("Found entry: " + e.getKey().toStringNoTruncate());
- Text expectedColqual = new ReplicationTarget("cluster1", "4", tableId).toText();
- Assert.assertEquals(expectedColqual, e.getKey().getColumnQualifier());
- notFound = false;
- } catch (NoSuchElementException e) {
-
- } catch (IllegalArgumentException e) {
- // Somehow we got more than one element. Log what they were
- s = ReplicationTable.getScanner(conn);
- for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + content.getValue());
- }
- Assert.fail("Found more than one work section entry");
- } catch (RuntimeException e) {
- // Catch a propagation issue, fail if it's not what we expect
- Throwable cause = e.getCause();
- if (cause instanceof AccumuloSecurityException) {
- AccumuloSecurityException sec = (AccumuloSecurityException) cause;
- switch (sec.getSecurityErrorCode()) {
- case PERMISSION_DENIED:
- // retry -- the grant didn't happen yet
- log.warn("Sleeping because permission was denied");
- break;
- default:
- throw e;
- }
- } else {
- throw e;
- }
- }
-
- Thread.sleep(2000);
- }
-
- if (notFound) {
- s = ReplicationTable.getScanner(conn);
- for (Entry<Key,Value> content : s) {
- log.info(content.getKey().toStringNoTruncate() + " => " + ProtobufUtil.toString(Status.parseFrom(content.getValue().get())));
- }
- Assert.assertFalse("Did not find the work entry for the status entry", notFound);
- }
-
- /**
- * By this point, we should have data ingested into a table, with at least one WAL as a candidate for replication. Compacting the table should close all
- * open WALs, which should ensure all records we're going to replicate have entries in the replication table, and nothing will exist in the metadata table
- * anymore
- */
-
- log.info("Killing tserver");
- // Kill the tserver(s) and restart them
- // to ensure that the WALs we previously observed all move to closed.
- cluster.getClusterControl().stop(ServerType.TABLET_SERVER);
-
- log.info("Starting tserver");
- cluster.getClusterControl().start(ServerType.TABLET_SERVER);
-
- log.info("Waiting to read tables");
- UtilWaitThread.sleep(2 * 3 * 1000);
-
- // Make sure we can read all the tables (recovery complete)
- for (String table : new String[] {MetadataTable.NAME, table1}) {
- Iterators.size(conn.createScanner(table, Authorizations.EMPTY).iterator());
- }
-
- log.info("Recovered metadata:");
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- for (Entry<Key,Value> entry : s) {
- log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
- }
-
- cluster.getClusterControl().start(ServerType.GARBAGE_COLLECTOR);
-
- // Wait for a bit since the GC has to run (should be running after a one second delay)
- waitForGCLock(conn);
-
- Thread.sleep(1000);
-
- log.info("After GC");
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- for (Entry<Key,Value> entry : s) {
- log.info("{}={}", entry.getKey().toStringNoTruncate(), entry.getValue());
- }
-
- // We expect no records in the metadata table after compaction. We have to poll
- // because we have to wait for the StatusMaker's next iteration which will clean
- // up the dangling *closed* records after we create the record in the replication table.
- // We need the GC to close the file (CloseWriteAheadLogReferences) before we can remove the record
- log.info("Checking metadata table for replication entries");
- Set<String> remaining = new HashSet<>();
- for (int i = 0; i < 10; i++) {
- s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
- s.setRange(ReplicationSection.getRange());
- remaining.clear();
- for (Entry<Key,Value> e : s) {
- remaining.add(e.getKey().getRow().toString());
- }
- remaining.retainAll(entries);
- if (remaining.isEmpty()) {
- break;
- }
- log.info("remaining {}", remaining);
- Thread.sleep(2000);
- log.info("");
- }
-
- Assert.assertTrue("Replication status messages were not cleaned up from metadata table", remaining.isEmpty());
-
- /**
- * After we close out and subsequently delete the metadata record, this will propagate to the replication table, which will cause those records to be
- * deleted after replication occurs
- */
-
- int recordsFound = 0;
- for (int i = 0; i < 30; i++) {
- s = ReplicationTable.getScanner(conn);
- recordsFound = 0;
- for (Entry<Key,Value> entry : s) {
- recordsFound++;
- log.info("{} {}", entry.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(entry.getValue().get())));
- }
-
- if (recordsFound <= 2) {
- break;
- } else {
- Thread.sleep(1000);
- log.info("");
- }
- }
-
- Assert.assertTrue("Found unexpected replication records in the replication table", recordsFound <= 2);
- }
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/replication/ReplicationRandomWalkIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/ReplicationRandomWalkIT.java b/test/src/test/java/org/apache/accumulo/test/replication/ReplicationRandomWalkIT.java
deleted file mode 100644
index 80bc69d..0000000
--- a/test/src/test/java/org/apache/accumulo/test/replication/ReplicationRandomWalkIT.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.replication;
-
-import static org.apache.accumulo.core.conf.Property.TSERV_ARCHIVE_WALOGS;
-import static org.apache.accumulo.core.conf.Property.TSERV_WALOG_MAX_SIZE;
-
-import java.util.Properties;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.test.functional.ConfigurableMacBase;
-import org.apache.accumulo.test.randomwalk.Environment;
-import org.apache.accumulo.test.randomwalk.concurrent.Replication;
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
-public class ReplicationRandomWalkIT extends ConfigurableMacBase {
-
- @Override
- protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
- cfg.setProperty(TSERV_ARCHIVE_WALOGS, "false");
- cfg.setProperty(TSERV_WALOG_MAX_SIZE, "1M");
- cfg.setNumTservers(1);
- }
-
- @Test(timeout = 5 * 60 * 1000)
- public void runReplicationRandomWalkStep() throws Exception {
- Replication r = new Replication();
-
- Environment env = new Environment(new Properties()) {
- @Override
- public String getUserName() {
- return "root";
- }
-
- @Override
- public String getPassword() {
- return ROOT_PASSWORD;
- }
-
- @Override
- public Connector getConnector() throws AccumuloException, AccumuloSecurityException {
- return ReplicationRandomWalkIT.this.getConnector();
- }
-
- };
- r.visit(null, env, null);
- }
-
-}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java b/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java
deleted file mode 100644
index b072aa7..0000000
--- a/test/src/test/java/org/apache/accumulo/test/replication/StatusCombinerMacIT.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.replication;
-
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.cluster.ClusterUser;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.metadata.MetadataTable;
-import org.apache.accumulo.core.metadata.schema.MetadataSchema;
-import org.apache.accumulo.core.protobuf.ProtobufUtil;
-import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
-import org.apache.accumulo.core.replication.ReplicationTable;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.harness.SharedMiniClusterBase;
-import org.apache.accumulo.server.replication.StatusUtil;
-import org.apache.accumulo.server.replication.proto.Replication.Status;
-import org.apache.accumulo.server.util.ReplicationTableUtil;
-import org.apache.hadoop.io.Text;
-import org.junit.Assert;
-import org.junit.Test;
-
-import com.google.common.collect.Iterables;
-
-public class StatusCombinerMacIT extends SharedMiniClusterBase {
-
- @Override
- public int defaultTimeoutSeconds() {
- return 60;
- }
-
- @Test
- public void testCombinerSetOnMetadata() throws Exception {
- TableOperations tops = getConnector().tableOperations();
- Map<String,EnumSet<IteratorScope>> iterators = tops.listIterators(MetadataTable.NAME);
-
- Assert.assertTrue(iterators.containsKey(ReplicationTableUtil.COMBINER_NAME));
- EnumSet<IteratorScope> scopes = iterators.get(ReplicationTableUtil.COMBINER_NAME);
- Assert.assertEquals(3, scopes.size());
- Assert.assertTrue(scopes.contains(IteratorScope.scan));
- Assert.assertTrue(scopes.contains(IteratorScope.minc));
- Assert.assertTrue(scopes.contains(IteratorScope.majc));
-
- Iterable<Entry<String,String>> propIter = tops.getProperties(MetadataTable.NAME);
- HashMap<String,String> properties = new HashMap<String,String>();
- for (Entry<String,String> entry : propIter) {
- properties.put(entry.getKey(), entry.getValue());
- }
-
- for (IteratorScope scope : scopes) {
- String key = Property.TABLE_ITERATOR_PREFIX.getKey() + scope.name() + "." + ReplicationTableUtil.COMBINER_NAME + ".opt.columns";
- Assert.assertTrue("Properties did not contain key : " + key, properties.containsKey(key));
- Assert.assertEquals(MetadataSchema.ReplicationSection.COLF.toString(), properties.get(key));
- }
- }
-
- @Test
- public void test() throws Exception {
- Connector conn = getConnector();
- ClusterUser user = getAdminUser();
-
- ReplicationTable.setOnline(conn);
- conn.securityOperations().grantTablePermission(user.getPrincipal(), ReplicationTable.NAME, TablePermission.WRITE);
- BatchWriter bw = ReplicationTable.getBatchWriter(conn);
- long createTime = System.currentTimeMillis();
- try {
- Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
- StatusSection.add(m, new Text("1"), StatusUtil.fileCreatedValue(createTime));
- bw.addMutation(m);
- } finally {
- bw.close();
- }
-
- Scanner s = ReplicationTable.getScanner(conn);
- Entry<Key,Value> entry = Iterables.getOnlyElement(s);
- Assert.assertEquals(StatusUtil.fileCreatedValue(createTime), entry.getValue());
-
- bw = ReplicationTable.getBatchWriter(conn);
- try {
- Mutation m = new Mutation("file:/accumulo/wal/HW10447.local+56808/93cdc17e-7521-44fa-87b5-37f45bcb92d3");
- StatusSection.add(m, new Text("1"), ProtobufUtil.toValue(StatusUtil.replicated(Long.MAX_VALUE)));
- bw.addMutation(m);
- } finally {
- bw.close();
- }
-
- s = ReplicationTable.getScanner(conn);
- entry = Iterables.getOnlyElement(s);
- Status stat = Status.parseFrom(entry.getValue().get());
- Assert.assertEquals(Long.MAX_VALUE, stat.getBegin());
- }
-
-}
[27/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java b/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
new file mode 100644
index 0000000..9797d7b
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/proxy/SimpleProxyBase.java
@@ -0,0 +1,2273 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.proxy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStreamReader;
+import java.net.InetAddress;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.UUID;
+
+import org.apache.accumulo.cluster.ClusterUser;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.DefaultConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVWriter;
+import org.apache.accumulo.core.iterators.DevNull;
+import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
+import org.apache.accumulo.core.iterators.user.SummingCombiner;
+import org.apache.accumulo.core.iterators.user.VersioningIterator;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.ByteBufferUtil;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
+import org.apache.accumulo.harness.MiniClusterHarness;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.apache.accumulo.harness.TestingKdc;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.proxy.Proxy;
+import org.apache.accumulo.proxy.thrift.AccumuloProxy.Client;
+import org.apache.accumulo.proxy.thrift.AccumuloSecurityException;
+import org.apache.accumulo.proxy.thrift.ActiveCompaction;
+import org.apache.accumulo.proxy.thrift.ActiveScan;
+import org.apache.accumulo.proxy.thrift.BatchScanOptions;
+import org.apache.accumulo.proxy.thrift.Column;
+import org.apache.accumulo.proxy.thrift.ColumnUpdate;
+import org.apache.accumulo.proxy.thrift.CompactionReason;
+import org.apache.accumulo.proxy.thrift.CompactionStrategyConfig;
+import org.apache.accumulo.proxy.thrift.CompactionType;
+import org.apache.accumulo.proxy.thrift.Condition;
+import org.apache.accumulo.proxy.thrift.ConditionalStatus;
+import org.apache.accumulo.proxy.thrift.ConditionalUpdates;
+import org.apache.accumulo.proxy.thrift.ConditionalWriterOptions;
+import org.apache.accumulo.proxy.thrift.DiskUsage;
+import org.apache.accumulo.proxy.thrift.IteratorScope;
+import org.apache.accumulo.proxy.thrift.IteratorSetting;
+import org.apache.accumulo.proxy.thrift.Key;
+import org.apache.accumulo.proxy.thrift.KeyValue;
+import org.apache.accumulo.proxy.thrift.MutationsRejectedException;
+import org.apache.accumulo.proxy.thrift.PartialKey;
+import org.apache.accumulo.proxy.thrift.Range;
+import org.apache.accumulo.proxy.thrift.ScanColumn;
+import org.apache.accumulo.proxy.thrift.ScanOptions;
+import org.apache.accumulo.proxy.thrift.ScanResult;
+import org.apache.accumulo.proxy.thrift.ScanState;
+import org.apache.accumulo.proxy.thrift.ScanType;
+import org.apache.accumulo.proxy.thrift.SystemPermission;
+import org.apache.accumulo.proxy.thrift.TableExistsException;
+import org.apache.accumulo.proxy.thrift.TableNotFoundException;
+import org.apache.accumulo.proxy.thrift.TablePermission;
+import org.apache.accumulo.proxy.thrift.TimeType;
+import org.apache.accumulo.proxy.thrift.UnknownScanner;
+import org.apache.accumulo.proxy.thrift.UnknownWriter;
+import org.apache.accumulo.proxy.thrift.WriterOptions;
+import org.apache.accumulo.server.util.PortUtils;
+import org.apache.accumulo.test.functional.SlowIterator;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.thrift.TApplicationException;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TProtocolFactory;
+import org.apache.thrift.server.TServer;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+import com.google.common.net.HostAndPort;
+
+/**
+ * Call every method on the proxy and try to verify that it works.
+ */
+public abstract class SimpleProxyBase extends SharedMiniClusterBase {
+ private static final Logger log = LoggerFactory.getLogger(SimpleProxyBase.class);
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ private static final long ZOOKEEPER_PROPAGATION_TIME = 10 * 1000;
+ private static TServer proxyServer;
+ private static int proxyPort;
+
+ private TestProxyClient proxyClient;
+ private org.apache.accumulo.proxy.thrift.AccumuloProxy.Client client;
+
+ private static Map<String,String> properties = new HashMap<>();
+ private static ByteBuffer creds = null;
+ private static String hostname, proxyPrincipal, proxyPrimary, clientPrincipal;
+ private static File proxyKeytab, clientKeytab;
+
+ // Implementations can set this
+ static TProtocolFactory factory = null;
+
+ private static void waitForAccumulo(Connector c) throws Exception {
+ Iterators.size(c.createScanner(MetadataTable.NAME, Authorizations.EMPTY).iterator());
+ }
+
+ private static boolean isKerberosEnabled() {
+ return SharedMiniClusterBase.TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION));
+ }
+
+ /**
+ * Does the actual test setup, invoked by the concrete test class
+ */
+ public static void setUpProxy() throws Exception {
+ assertNotNull("Implementations must initialize the TProtocolFactory", factory);
+
+ Connector c = SharedMiniClusterBase.getConnector();
+ Instance inst = c.getInstance();
+ waitForAccumulo(c);
+
+ hostname = InetAddress.getLocalHost().getCanonicalHostName();
+
+ Properties props = new Properties();
+ props.put("instance", inst.getInstanceName());
+ props.put("zookeepers", inst.getZooKeepers());
+
+ final String tokenClass;
+ if (isKerberosEnabled()) {
+ tokenClass = KerberosToken.class.getName();
+ TestingKdc kdc = getKdc();
+
+ // Create a principal+keytab for the proxy
+ proxyKeytab = new File(kdc.getKeytabDir(), "proxy.keytab");
+ hostname = InetAddress.getLocalHost().getCanonicalHostName();
+ // Set the primary because the client needs to know it
+ proxyPrimary = "proxy";
+ // Qualify with an instance
+ proxyPrincipal = proxyPrimary + "/" + hostname;
+ kdc.createPrincipal(proxyKeytab, proxyPrincipal);
+ // Tack on the realm too
+ proxyPrincipal = kdc.qualifyUser(proxyPrincipal);
+
+ props.setProperty("kerberosPrincipal", proxyPrincipal);
+ props.setProperty("kerberosKeytab", proxyKeytab.getCanonicalPath());
+ props.setProperty("thriftServerType", "sasl");
+
+ // Enabled kerberos auth
+ Configuration conf = new Configuration(false);
+ conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+
+ // Login for the Proxy itself
+ UserGroupInformation.loginUserFromKeytab(proxyPrincipal, proxyKeytab.getAbsolutePath());
+
+ // User for tests
+ ClusterUser user = kdc.getRootUser();
+ clientPrincipal = user.getPrincipal();
+ clientKeytab = user.getKeytab();
+ } else {
+ clientPrincipal = "root";
+ tokenClass = PasswordToken.class.getName();
+ properties.put("password", SharedMiniClusterBase.getRootPassword());
+ hostname = "localhost";
+ }
+
+ props.put("tokenClass", tokenClass);
+
+ ClientConfiguration clientConfig = SharedMiniClusterBase.getCluster().getClientConfig();
+ String clientConfPath = new File(SharedMiniClusterBase.getCluster().getConfig().getConfDir(), "client.conf").getAbsolutePath();
+ props.put("clientConfigurationFile", clientConfPath);
+ properties.put("clientConfigurationFile", clientConfPath);
+
+ proxyPort = PortUtils.getRandomFreePort();
+ proxyServer = Proxy.createProxyServer(HostAndPort.fromParts(hostname, proxyPort), factory, props, clientConfig).server;
+ while (!proxyServer.isServing())
+ UtilWaitThread.sleep(100);
+ }
+
+ @AfterClass
+ public static void tearDownProxy() throws Exception {
+ if (null != proxyServer) {
+ proxyServer.stop();
+ }
+ }
+
+ final IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "200"));
+ String table;
+ ByteBuffer badLogin;
+
+ @Before
+ public void setup() throws Exception {
+ // Create a new client for each test
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ proxyClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, UserGroupInformation.getCurrentUser());
+ client = proxyClient.proxy();
+ creds = client.login(clientPrincipal, properties);
+
+ TestingKdc kdc = getKdc();
+ final ClusterUser user = kdc.getClientPrincipal(0);
+ // Create another user
+ client.createLocalUser(creds, user.getPrincipal(), s2bb("unused"));
+ // Login in as that user we just created
+ UserGroupInformation.loginUserFromKeytab(user.getPrincipal(), user.getKeytab().getAbsolutePath());
+ final UserGroupInformation badUgi = UserGroupInformation.getCurrentUser();
+ // Get a "Credentials" object for the proxy
+ TestProxyClient badClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, badUgi);
+ try {
+ Client badProxy = badClient.proxy();
+ badLogin = badProxy.login(user.getPrincipal(), properties);
+ } finally {
+ badClient.close();
+ }
+
+ // Log back in as the test user
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ // Drop test user, invalidating the credentials (not to mention not having the krb credentials anymore)
+ client.dropLocalUser(creds, user.getPrincipal());
+ } else {
+ proxyClient = new TestProxyClient(hostname, proxyPort, factory);
+ client = proxyClient.proxy();
+ creds = client.login("root", properties);
+
+ // Create 'user'
+ client.createLocalUser(creds, "user", s2bb(SharedMiniClusterBase.getRootPassword()));
+ // Log in as 'user'
+ badLogin = client.login("user", properties);
+ // Drop 'user', invalidating the credentials
+ client.dropLocalUser(creds, "user");
+ }
+
+ // Create a general table to be used
+ table = getUniqueNames(1)[0];
+ client.createTable(creds, table, true, TimeType.MILLIS);
+ }
+
+ @After
+ public void teardown() throws Exception {
+ if (null != table) {
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ }
+ try {
+ if (client.tableExists(creds, table)) {
+ client.deleteTable(creds, table);
+ }
+ } catch (Exception e) {
+ log.warn("Failed to delete test table", e);
+ }
+ }
+
+ // Close the transport after the test
+ if (null != proxyClient) {
+ proxyClient.close();
+ }
+ }
+
+ /*
+ * Set a lower timeout for tests that should fail fast
+ */
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void addConstraintLoginFailure() throws Exception {
+ client.addConstraint(badLogin, table, NumericValueConstraint.class.getName());
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void addSplitsLoginFailure() throws Exception {
+ client.addSplits(badLogin, table, Collections.singleton(s2bb("1")));
+ }
+
+ @Test(expected = TApplicationException.class, timeout = 5000)
+ public void clearLocatorCacheLoginFailure() throws Exception {
+ client.clearLocatorCache(badLogin, table);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void compactTableLoginFailure() throws Exception {
+ client.compactTable(badLogin, table, null, null, null, true, false, null);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void cancelCompactionLoginFailure() throws Exception {
+ client.cancelCompaction(badLogin, table);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void createTableLoginFailure() throws Exception {
+ client.createTable(badLogin, table, false, TimeType.MILLIS);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void deleteTableLoginFailure() throws Exception {
+ client.deleteTable(badLogin, table);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void deleteRowsLoginFailure() throws Exception {
+ client.deleteRows(badLogin, table, null, null);
+ }
+
+ @Test(expected = TApplicationException.class, timeout = 5000)
+ public void tableExistsLoginFailure() throws Exception {
+ client.tableExists(badLogin, table);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void flustTableLoginFailure() throws Exception {
+ client.flushTable(badLogin, table, null, null, false);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void getLocalityGroupsLoginFailure() throws Exception {
+ client.getLocalityGroups(badLogin, table);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void getMaxRowLoginFailure() throws Exception {
+ client.getMaxRow(badLogin, table, Collections.<ByteBuffer> emptySet(), null, false, null, false);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void getTablePropertiesLoginFailure() throws Exception {
+ client.getTableProperties(badLogin, table);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void listSplitsLoginFailure() throws Exception {
+ client.listSplits(badLogin, table, 10000);
+ }
+
+ @Test(expected = TApplicationException.class, timeout = 5000)
+ public void listTablesLoginFailure() throws Exception {
+ client.listTables(badLogin);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void listConstraintsLoginFailure() throws Exception {
+ client.listConstraints(badLogin, table);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void mergeTabletsLoginFailure() throws Exception {
+ client.mergeTablets(badLogin, table, null, null);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void offlineTableLoginFailure() throws Exception {
+ client.offlineTable(badLogin, table, false);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void onlineTableLoginFailure() throws Exception {
+ client.onlineTable(badLogin, table, false);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void removeConstraintLoginFailure() throws Exception {
+ client.removeConstraint(badLogin, table, 0);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void removeTablePropertyLoginFailure() throws Exception {
+ client.removeTableProperty(badLogin, table, Property.TABLE_FILE_MAX.getKey());
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void renameTableLoginFailure() throws Exception {
+ client.renameTable(badLogin, table, "someTableName");
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void setLocalityGroupsLoginFailure() throws Exception {
+ Map<String,Set<String>> groups = new HashMap<String,Set<String>>();
+ groups.put("group1", Collections.singleton("cf1"));
+ groups.put("group2", Collections.singleton("cf2"));
+ client.setLocalityGroups(badLogin, table, groups);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void setTablePropertyLoginFailure() throws Exception {
+ client.setTableProperty(badLogin, table, Property.TABLE_FILE_MAX.getKey(), "0");
+ }
+
+ @Test(expected = TException.class, timeout = 5000)
+ public void tableIdMapLoginFailure() throws Exception {
+ client.tableIdMap(badLogin);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void getSiteConfigurationLoginFailure() throws Exception {
+ client.getSiteConfiguration(badLogin);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void getSystemConfigurationLoginFailure() throws Exception {
+ client.getSystemConfiguration(badLogin);
+ }
+
+ @Test(expected = TException.class, timeout = 5000)
+ public void getTabletServersLoginFailure() throws Exception {
+ client.getTabletServers(badLogin);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void getActiveScansLoginFailure() throws Exception {
+ client.getActiveScans(badLogin, "fake");
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void getActiveCompactionsLoginFailure() throws Exception {
+ client.getActiveCompactions(badLogin, "fakse");
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void removePropertyLoginFailure() throws Exception {
+ client.removeProperty(badLogin, "table.split.threshold");
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void setPropertyLoginFailure() throws Exception {
+ client.setProperty(badLogin, "table.split.threshold", "500M");
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void testClassLoadLoginFailure() throws Exception {
+ client.testClassLoad(badLogin, DevNull.class.getName(), SortedKeyValueIterator.class.getName());
+ }
+
+ @Test(timeout = 5000)
+ public void authenticateUserLoginFailure() throws Exception {
+ if (!isKerberosEnabled()) {
+ try {
+ // Not really a relevant test for kerberos
+ client.authenticateUser(badLogin, "root", s2pp(SharedMiniClusterBase.getRootPassword()));
+ fail("Expected AccumuloSecurityException");
+ } catch (AccumuloSecurityException e) {
+ // Expected
+ return;
+ }
+ }
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void changeUserAuthorizationsLoginFailure() throws Exception {
+ HashSet<ByteBuffer> auths = new HashSet<ByteBuffer>(Arrays.asList(s2bb("A"), s2bb("B")));
+ client.changeUserAuthorizations(badLogin, "stooge", auths);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void changePasswordLoginFailure() throws Exception {
+ client.changeLocalUserPassword(badLogin, "stooge", s2bb(""));
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void createUserLoginFailure() throws Exception {
+ client.createLocalUser(badLogin, "stooge", s2bb("password"));
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void dropUserLoginFailure() throws Exception {
+ client.dropLocalUser(badLogin, "stooge");
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void getUserAuthorizationsLoginFailure() throws Exception {
+ client.getUserAuthorizations(badLogin, "stooge");
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void grantSystemPermissionLoginFailure() throws Exception {
+ client.grantSystemPermission(badLogin, "stooge", SystemPermission.CREATE_TABLE);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void grantTablePermissionLoginFailure() throws Exception {
+ client.grantTablePermission(badLogin, "root", table, TablePermission.WRITE);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void hasSystemPermissionLoginFailure() throws Exception {
+ client.hasSystemPermission(badLogin, "stooge", SystemPermission.CREATE_TABLE);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void hasTablePermission() throws Exception {
+ client.hasTablePermission(badLogin, "root", table, TablePermission.WRITE);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void listLocalUsersLoginFailure() throws Exception {
+ client.listLocalUsers(badLogin);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void revokeSystemPermissionLoginFailure() throws Exception {
+ client.revokeSystemPermission(badLogin, "stooge", SystemPermission.CREATE_TABLE);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void revokeTablePermissionLoginFailure() throws Exception {
+ client.revokeTablePermission(badLogin, "root", table, TablePermission.ALTER_TABLE);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void createScannerLoginFailure() throws Exception {
+ client.createScanner(badLogin, table, new ScanOptions());
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void createBatchScannerLoginFailure() throws Exception {
+ client.createBatchScanner(badLogin, table, new BatchScanOptions());
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void updateAndFlushLoginFailure() throws Exception {
+ client.updateAndFlush(badLogin, table, new HashMap<ByteBuffer,List<ColumnUpdate>>());
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void createWriterLoginFailure() throws Exception {
+ client.createWriter(badLogin, table, new WriterOptions());
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void attachIteratorLoginFailure() throws Exception {
+ client.attachIterator(badLogin, "slow", setting, EnumSet.allOf(IteratorScope.class));
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void checkIteratorLoginFailure() throws Exception {
+ client.checkIteratorConflicts(badLogin, table, setting, EnumSet.allOf(IteratorScope.class));
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void cloneTableLoginFailure() throws Exception {
+ client.cloneTable(badLogin, table, table + "_clone", false, null, null);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void exportTableLoginFailure() throws Exception {
+ client.exportTable(badLogin, table, "/tmp");
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void importTableLoginFailure() throws Exception {
+ client.importTable(badLogin, "testify", "/tmp");
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void getIteratorSettingLoginFailure() throws Exception {
+ client.getIteratorSetting(badLogin, table, "foo", IteratorScope.SCAN);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void listIteratorsLoginFailure() throws Exception {
+ client.listIterators(badLogin, table);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void removeIteratorLoginFailure() throws Exception {
+ client.removeIterator(badLogin, table, "name", EnumSet.allOf(IteratorScope.class));
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void splitRangeByTabletsLoginFailure() throws Exception {
+ client.splitRangeByTablets(badLogin, table, client.getRowRange(ByteBuffer.wrap("row".getBytes())), 10);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void importDirectoryLoginFailure() throws Exception {
+ MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
+ Path base = cluster.getTemporaryPath();
+ Path importDir = new Path(base, "importDir");
+ Path failuresDir = new Path(base, "failuresDir");
+ assertTrue(cluster.getFileSystem().mkdirs(importDir));
+ assertTrue(cluster.getFileSystem().mkdirs(failuresDir));
+ client.importDirectory(badLogin, table, importDir.toString(), failuresDir.toString(), true);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void pingTabletServerLoginFailure() throws Exception {
+ client.pingTabletServer(badLogin, "fake");
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void loginFailure() throws Exception {
+ client.login("badUser", properties);
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void testTableClassLoadLoginFailure() throws Exception {
+ client.testTableClassLoad(badLogin, table, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
+ }
+
+ @Test(expected = AccumuloSecurityException.class, timeout = 5000)
+ public void createConditionalWriterLoginFailure() throws Exception {
+ client.createConditionalWriter(badLogin, table, new ConditionalWriterOptions());
+ }
+
+ @Test
+ public void tableNotFound() throws Exception {
+ final String doesNotExist = "doesNotExists";
+ try {
+ client.addConstraint(creds, doesNotExist, NumericValueConstraint.class.getName());
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.addSplits(creds, doesNotExist, Collections.<ByteBuffer> emptySet());
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ final IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "200"));
+ try {
+ client.attachIterator(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.cancelCompaction(creds, doesNotExist);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.checkIteratorConflicts(creds, doesNotExist, setting, EnumSet.allOf(IteratorScope.class));
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.clearLocatorCache(creds, doesNotExist);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ final String TABLE_TEST = getUniqueNames(1)[0];
+ client.cloneTable(creds, doesNotExist, TABLE_TEST, false, null, null);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.compactTable(creds, doesNotExist, null, null, null, true, false, null);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.createBatchScanner(creds, doesNotExist, new BatchScanOptions());
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.createScanner(creds, doesNotExist, new ScanOptions());
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.createWriter(creds, doesNotExist, new WriterOptions());
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.deleteRows(creds, doesNotExist, null, null);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.deleteTable(creds, doesNotExist);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.exportTable(creds, doesNotExist, "/tmp");
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.flushTable(creds, doesNotExist, null, null, false);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.getIteratorSetting(creds, doesNotExist, "foo", IteratorScope.SCAN);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.getLocalityGroups(creds, doesNotExist);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.getMaxRow(creds, doesNotExist, Collections.<ByteBuffer> emptySet(), null, false, null, false);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.getTableProperties(creds, doesNotExist);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.grantTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.hasTablePermission(creds, "root", doesNotExist, TablePermission.WRITE);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
+ Path base = cluster.getTemporaryPath();
+ Path importDir = new Path(base, "importDir");
+ Path failuresDir = new Path(base, "failuresDir");
+ assertTrue(cluster.getFileSystem().mkdirs(importDir));
+ assertTrue(cluster.getFileSystem().mkdirs(failuresDir));
+ client.importDirectory(creds, doesNotExist, importDir.toString(), failuresDir.toString(), true);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.listConstraints(creds, doesNotExist);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.listSplits(creds, doesNotExist, 10000);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.mergeTablets(creds, doesNotExist, null, null);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.offlineTable(creds, doesNotExist, false);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.onlineTable(creds, doesNotExist, false);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.removeConstraint(creds, doesNotExist, 0);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.removeIterator(creds, doesNotExist, "name", EnumSet.allOf(IteratorScope.class));
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.removeTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey());
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.renameTable(creds, doesNotExist, "someTableName");
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.revokeTablePermission(creds, "root", doesNotExist, TablePermission.ALTER_TABLE);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.setTableProperty(creds, doesNotExist, Property.TABLE_FILE_MAX.getKey(), "0");
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.splitRangeByTablets(creds, doesNotExist, client.getRowRange(ByteBuffer.wrap("row".getBytes())), 10);
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.updateAndFlush(creds, doesNotExist, new HashMap<ByteBuffer,List<ColumnUpdate>>());
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.getDiskUsage(creds, Collections.singleton(doesNotExist));
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.testTableClassLoad(creds, doesNotExist, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName());
+ fail("exception not thrown");
+ } catch (TableNotFoundException ex) {}
+ try {
+ client.createConditionalWriter(creds, doesNotExist, new ConditionalWriterOptions());
+ } catch (TableNotFoundException ex) {}
+ }
+
+ @Test
+ public void testExists() throws Exception {
+ client.createTable(creds, "ett1", false, TimeType.MILLIS);
+ client.createTable(creds, "ett2", false, TimeType.MILLIS);
+ try {
+ client.createTable(creds, "ett1", false, TimeType.MILLIS);
+ fail("exception not thrown");
+ } catch (TableExistsException tee) {}
+ try {
+ client.renameTable(creds, "ett1", "ett2");
+ fail("exception not thrown");
+ } catch (TableExistsException tee) {}
+ try {
+ client.cloneTable(creds, "ett1", "ett2", false, new HashMap<String,String>(), new HashSet<String>());
+ fail("exception not thrown");
+ } catch (TableExistsException tee) {}
+ }
+
+ @Test
+ public void testUnknownScanner() throws Exception {
+ String scanner = client.createScanner(creds, table, null);
+ assertFalse(client.hasNext(scanner));
+ client.closeScanner(scanner);
+
+ try {
+ client.hasNext(scanner);
+ fail("exception not thrown");
+ } catch (UnknownScanner us) {}
+
+ try {
+ client.closeScanner(scanner);
+ fail("exception not thrown");
+ } catch (UnknownScanner us) {}
+
+ try {
+ client.nextEntry("99999999");
+ fail("exception not thrown");
+ } catch (UnknownScanner us) {}
+ try {
+ client.nextK("99999999", 6);
+ fail("exception not thrown");
+ } catch (UnknownScanner us) {}
+ try {
+ client.hasNext("99999999");
+ fail("exception not thrown");
+ } catch (UnknownScanner us) {}
+ try {
+ client.hasNext(UUID.randomUUID().toString());
+ fail("exception not thrown");
+ } catch (UnknownScanner us) {}
+ }
+
+ @Test
+ public void testUnknownWriter() throws Exception {
+ String writer = client.createWriter(creds, table, null);
+ client.update(writer, mutation("row0", "cf", "cq", "value"));
+ client.flush(writer);
+ client.update(writer, mutation("row2", "cf", "cq", "value2"));
+ client.closeWriter(writer);
+
+ // this is a oneway call, so it does not throw exceptions
+ client.update(writer, mutation("row2", "cf", "cq", "value2"));
+
+ try {
+ client.flush(writer);
+ fail("exception not thrown");
+ } catch (UnknownWriter uw) {}
+ try {
+ client.flush("99999");
+ fail("exception not thrown");
+ } catch (UnknownWriter uw) {}
+ try {
+ client.flush(UUID.randomUUID().toString());
+ fail("exception not thrown");
+ } catch (UnknownWriter uw) {}
+ try {
+ client.closeWriter("99999");
+ fail("exception not thrown");
+ } catch (UnknownWriter uw) {}
+ }
+
+ @Test
+ public void testDelete() throws Exception {
+ client.updateAndFlush(creds, table, mutation("row0", "cf", "cq", "value"));
+
+ assertScan(new String[][] {{"row0", "cf", "cq", "value"}}, table);
+
+ ColumnUpdate upd = new ColumnUpdate(s2bb("cf"), s2bb("cq"));
+ upd.setDeleteCell(false);
+ Map<ByteBuffer,List<ColumnUpdate>> notDelete = Collections.singletonMap(s2bb("row0"), Collections.singletonList(upd));
+ client.updateAndFlush(creds, table, notDelete);
+ String scanner = client.createScanner(creds, table, null);
+ ScanResult entries = client.nextK(scanner, 10);
+ client.closeScanner(scanner);
+ assertFalse(entries.more);
+ assertEquals("Results: " + entries.results, 1, entries.results.size());
+
+ upd = new ColumnUpdate(s2bb("cf"), s2bb("cq"));
+ upd.setDeleteCell(true);
+ Map<ByteBuffer,List<ColumnUpdate>> delete = Collections.singletonMap(s2bb("row0"), Collections.singletonList(upd));
+
+ client.updateAndFlush(creds, table, delete);
+
+ assertScan(new String[][] {}, table);
+ }
+
+ @Test
+ public void testSystemProperties() throws Exception {
+ Map<String,String> cfg = client.getSiteConfiguration(creds);
+
+ // set a property in zookeeper
+ client.setProperty(creds, "table.split.threshold", "500M");
+
+ // check that we can read it
+ for (int i = 0; i < 5; i++) {
+ cfg = client.getSystemConfiguration(creds);
+ if ("500M".equals(cfg.get("table.split.threshold")))
+ break;
+ UtilWaitThread.sleep(200);
+ }
+ assertEquals("500M", cfg.get("table.split.threshold"));
+
+ // unset the setting, check that it's not what it was
+ client.removeProperty(creds, "table.split.threshold");
+ for (int i = 0; i < 5; i++) {
+ cfg = client.getSystemConfiguration(creds);
+ if (!"500M".equals(cfg.get("table.split.threshold")))
+ break;
+ UtilWaitThread.sleep(200);
+ }
+ assertNotEquals("500M", cfg.get("table.split.threshold"));
+ }
+
+ @Test
+ public void pingTabletServers() throws Exception {
+ int tservers = 0;
+ for (String tserver : client.getTabletServers(creds)) {
+ client.pingTabletServer(creds, tserver);
+ tservers++;
+ }
+ assertTrue(tservers > 0);
+ }
+
+ @Test
+ public void testSiteConfiguration() throws Exception {
+ // get something we know is in the site config
+ MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
+ Map<String,String> cfg = client.getSiteConfiguration(creds);
+ assertTrue(cfg.get("instance.dfs.dir").startsWith(cluster.getConfig().getAccumuloDir().getAbsolutePath()));
+ }
+
+ @Test
+ public void testClassLoad() throws Exception {
+ // try to load some classes via the proxy
+ assertTrue(client.testClassLoad(creds, DevNull.class.getName(), SortedKeyValueIterator.class.getName()));
+ assertFalse(client.testClassLoad(creds, "foo.bar", SortedKeyValueIterator.class.getName()));
+ }
+
+ @Test
+ public void attachIteratorsWithScans() throws Exception {
+ if (client.tableExists(creds, "slow")) {
+ client.deleteTable(creds, "slow");
+ }
+
+ // create a table that's very slow, so we can look for scans
+ client.createTable(creds, "slow", true, TimeType.MILLIS);
+ IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "250"));
+ client.attachIterator(creds, "slow", setting, EnumSet.allOf(IteratorScope.class));
+
+ // Should take 10 seconds to read every record
+ for (int i = 0; i < 40; i++) {
+ client.updateAndFlush(creds, "slow", mutation("row" + i, "cf", "cq", "value"));
+ }
+
+ // scan
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ String scanner;
+ TestProxyClient proxyClient2 = null;
+ try {
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ proxyClient2 = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, UserGroupInformation.getCurrentUser());
+ } else {
+ proxyClient2 = new TestProxyClient(hostname, proxyPort, factory);
+ }
+
+ Client client2 = proxyClient2.proxy();
+ scanner = client2.createScanner(creds, "slow", null);
+ client2.nextK(scanner, 10);
+ client2.closeScanner(scanner);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ if (null != proxyClient2) {
+ proxyClient2.close();
+ }
+ }
+ }
+ };
+ t.start();
+
+ // look for the scan many times
+ List<ActiveScan> scans = new ArrayList<ActiveScan>();
+ for (int i = 0; i < 100 && scans.isEmpty(); i++) {
+ for (String tserver : client.getTabletServers(creds)) {
+ List<ActiveScan> scansForServer = client.getActiveScans(creds, tserver);
+ for (ActiveScan scan : scansForServer) {
+ if (clientPrincipal.equals(scan.getUser())) {
+ scans.add(scan);
+ }
+ }
+
+ if (!scans.isEmpty())
+ break;
+ UtilWaitThread.sleep(100);
+ }
+ }
+ t.join();
+
+ assertFalse("Expected to find scans, but found none", scans.isEmpty());
+ boolean found = false;
+ Map<String,String> map = null;
+ for (int i = 0; i < scans.size() && !found; i++) {
+ ActiveScan scan = scans.get(i);
+ if (clientPrincipal.equals(scan.getUser())) {
+ assertTrue(ScanState.RUNNING.equals(scan.getState()) || ScanState.QUEUED.equals(scan.getState()));
+ assertEquals(ScanType.SINGLE, scan.getType());
+ assertEquals("slow", scan.getTable());
+
+ map = client.tableIdMap(creds);
+ assertEquals(map.get("slow"), scan.getExtent().tableId);
+ assertTrue(scan.getExtent().endRow == null);
+ assertTrue(scan.getExtent().prevEndRow == null);
+ found = true;
+ }
+ }
+
+ assertTrue("Could not find a scan against the 'slow' table", found);
+ }
+
+ @Test
+ public void attachIteratorWithCompactions() throws Exception {
+ if (client.tableExists(creds, "slow")) {
+ client.deleteTable(creds, "slow");
+ }
+
+ // create a table that's very slow, so we can look for compactions
+ client.createTable(creds, "slow", true, TimeType.MILLIS);
+ IteratorSetting setting = new IteratorSetting(100, "slow", SlowIterator.class.getName(), Collections.singletonMap("sleepTime", "250"));
+ client.attachIterator(creds, "slow", setting, EnumSet.allOf(IteratorScope.class));
+
+ // Should take 10 seconds to read every record
+ for (int i = 0; i < 40; i++) {
+ client.updateAndFlush(creds, "slow", mutation("row" + i, "cf", "cq", "value"));
+ }
+
+ Map<String,String> map = client.tableIdMap(creds);
+
+ // start a compaction
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ TestProxyClient proxyClient2 = null;
+ try {
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ proxyClient2 = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, UserGroupInformation.getCurrentUser());
+ } else {
+ proxyClient2 = new TestProxyClient(hostname, proxyPort, factory);
+ }
+ Client client2 = proxyClient2.proxy();
+ client2.compactTable(creds, "slow", null, null, null, true, true, null);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ if (null != proxyClient2) {
+ proxyClient2.close();
+ }
+ }
+ }
+ };
+ t.start();
+
+ final String desiredTableId = map.get("slow");
+
+ // Make sure we can find the slow table
+ assertNotNull(desiredTableId);
+
+ // try to catch it in the act
+ List<ActiveCompaction> compactions = new ArrayList<ActiveCompaction>();
+ for (int i = 0; i < 100 && compactions.isEmpty(); i++) {
+ // Iterate over the tservers
+ for (String tserver : client.getTabletServers(creds)) {
+ // And get the compactions on each
+ List<ActiveCompaction> compactionsOnServer = client.getActiveCompactions(creds, tserver);
+ for (ActiveCompaction compact : compactionsOnServer) {
+ // There might be other compactions occurring (e.g. on METADATA) in which
+ // case we want to prune out those that aren't for our slow table
+ if (desiredTableId.equals(compact.getExtent().tableId)) {
+ compactions.add(compact);
+ }
+ }
+
+ // If we found a compaction for the table we wanted, so we can stop looking
+ if (!compactions.isEmpty())
+ break;
+ }
+ UtilWaitThread.sleep(10);
+ }
+ t.join();
+
+ // verify the compaction information
+ assertFalse(compactions.isEmpty());
+ for (ActiveCompaction c : compactions) {
+ if (desiredTableId.equals(c.getExtent().tableId)) {
+ assertTrue(c.inputFiles.isEmpty());
+ assertEquals(CompactionType.MINOR, c.getType());
+ assertEquals(CompactionReason.USER, c.getReason());
+ assertEquals("", c.localityGroup);
+ assertTrue(c.outputFile.contains("default_tablet"));
+
+ return;
+ }
+ }
+
+ fail("Expection to find running compaction for table 'slow' but did not find one");
+ }
+
+ @Test
+ public void userAuthentication() throws Exception {
+ if (isKerberosEnabled()) {
+ assertTrue(client.authenticateUser(creds, clientPrincipal, Collections.<String,String> emptyMap()));
+ // Can't really authenticate "badly" at the application level w/ kerberos. It's going to fail to even set up an RPC
+ } else {
+ // check password
+ assertTrue(client.authenticateUser(creds, "root", s2pp(SharedMiniClusterBase.getRootPassword())));
+ assertFalse(client.authenticateUser(creds, "root", s2pp("")));
+ }
+ }
+
+ @Test
+ public void userManagement() throws Exception {
+
+ String user;
+ ClusterUser otherClient = null;
+ ByteBuffer password = s2bb("password");
+ if (isKerberosEnabled()) {
+ otherClient = getKdc().getClientPrincipal(1);
+ user = otherClient.getPrincipal();
+ } else {
+ user = getUniqueNames(1)[0];
+ }
+
+ // create a user
+ client.createLocalUser(creds, user, password);
+ // change auths
+ Set<String> users = client.listLocalUsers(creds);
+ Set<String> expectedUsers = new HashSet<String>(Arrays.asList(clientPrincipal, user));
+ assertTrue("Did not find all expected users: " + expectedUsers, users.containsAll(expectedUsers));
+ HashSet<ByteBuffer> auths = new HashSet<ByteBuffer>(Arrays.asList(s2bb("A"), s2bb("B")));
+ client.changeUserAuthorizations(creds, user, auths);
+ List<ByteBuffer> update = client.getUserAuthorizations(creds, user);
+ assertEquals(auths, new HashSet<ByteBuffer>(update));
+
+ // change password
+ if (!isKerberosEnabled()) {
+ password = s2bb("");
+ client.changeLocalUserPassword(creds, user, password);
+ assertTrue(client.authenticateUser(creds, user, s2pp(password.toString())));
+ }
+
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
+ final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ // Re-login in and make a new connection. Can't use the previous one
+
+ TestProxyClient otherProxyClient = null;
+ try {
+ otherProxyClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, ugi);
+ otherProxyClient.proxy().login(user, Collections.<String,String> emptyMap());
+ } finally {
+ if (null != otherProxyClient) {
+ otherProxyClient.close();
+ }
+ }
+ } else {
+ // check login with new password
+ client.login(user, s2pp(password.toString()));
+ }
+ }
+
+ @Test
+ public void userPermissions() throws Exception {
+ String userName = getUniqueNames(1)[0];
+ ClusterUser otherClient = null;
+ ByteBuffer password = s2bb("password");
+ ByteBuffer user;
+
+ TestProxyClient origProxyClient = null;
+ Client origClient = null;
+ TestProxyClient userProxyClient = null;
+ Client userClient = null;
+
+ if (isKerberosEnabled()) {
+ otherClient = getKdc().getClientPrincipal(1);
+ userName = otherClient.getPrincipal();
+
+ UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
+ final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ // Re-login in and make a new connection. Can't use the previous one
+
+ userProxyClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, ugi);
+
+ origProxyClient = proxyClient;
+ origClient = client;
+ userClient = client = userProxyClient.proxy();
+
+ user = client.login(userName, Collections.<String,String> emptyMap());
+ } else {
+ userName = getUniqueNames(1)[0];
+ // create a user
+ client.createLocalUser(creds, userName, password);
+ user = client.login(userName, s2pp(password.toString()));
+ }
+
+ // check permission failure
+ try {
+ client.createTable(user, "fail", true, TimeType.MILLIS);
+ fail("should not create the table");
+ } catch (AccumuloSecurityException ex) {
+ if (isKerberosEnabled()) {
+ // Switch back to original client
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ client = origClient;
+ }
+ assertFalse(client.listTables(creds).contains("fail"));
+ }
+ // grant permissions and test
+ assertFalse(client.hasSystemPermission(creds, userName, SystemPermission.CREATE_TABLE));
+ client.grantSystemPermission(creds, userName, SystemPermission.CREATE_TABLE);
+ assertTrue(client.hasSystemPermission(creds, userName, SystemPermission.CREATE_TABLE));
+ if (isKerberosEnabled()) {
+ // Switch back to the extra user
+ UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
+ client = userClient;
+ }
+ client.createTable(user, "success", true, TimeType.MILLIS);
+ if (isKerberosEnabled()) {
+ // Switch back to original client
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ client = origClient;
+ }
+ client.listTables(creds).contains("succcess");
+
+ // revoke permissions
+ client.revokeSystemPermission(creds, userName, SystemPermission.CREATE_TABLE);
+ assertFalse(client.hasSystemPermission(creds, userName, SystemPermission.CREATE_TABLE));
+ try {
+ if (isKerberosEnabled()) {
+ // Switch back to the extra user
+ UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
+ client = userClient;
+ }
+ client.createTable(user, "fail", true, TimeType.MILLIS);
+ fail("should not create the table");
+ } catch (AccumuloSecurityException ex) {
+ if (isKerberosEnabled()) {
+ // Switch back to original client
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ client = origClient;
+ }
+ assertFalse(client.listTables(creds).contains("fail"));
+ }
+ // denied!
+ try {
+ if (isKerberosEnabled()) {
+ // Switch back to the extra user
+ UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
+ client = userClient;
+ }
+ String scanner = client.createScanner(user, table, null);
+ client.nextK(scanner, 100);
+ fail("stooge should not read table test");
+ } catch (AccumuloSecurityException ex) {}
+
+ if (isKerberosEnabled()) {
+ // Switch back to original client
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ client = origClient;
+ }
+
+ // grant
+ assertFalse(client.hasTablePermission(creds, userName, table, TablePermission.READ));
+ client.grantTablePermission(creds, userName, table, TablePermission.READ);
+ assertTrue(client.hasTablePermission(creds, userName, table, TablePermission.READ));
+
+ if (isKerberosEnabled()) {
+ // Switch back to the extra user
+ UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
+ client = userClient;
+ }
+ String scanner = client.createScanner(user, table, null);
+ client.nextK(scanner, 10);
+ client.closeScanner(scanner);
+
+ if (isKerberosEnabled()) {
+ // Switch back to original client
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ client = origClient;
+ }
+
+ // revoke
+ client.revokeTablePermission(creds, userName, table, TablePermission.READ);
+ assertFalse(client.hasTablePermission(creds, userName, table, TablePermission.READ));
+ try {
+ if (isKerberosEnabled()) {
+ // Switch back to the extra user
+ UserGroupInformation.loginUserFromKeytab(otherClient.getPrincipal(), otherClient.getKeytab().getAbsolutePath());
+ client = userClient;
+ }
+ scanner = client.createScanner(user, table, null);
+ client.nextK(scanner, 100);
+ fail("stooge should not read table test");
+ } catch (AccumuloSecurityException ex) {}
+
+ if (isKerberosEnabled()) {
+ // Switch back to original client
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ client = origClient;
+ }
+
+ // delete user
+ client.dropLocalUser(creds, userName);
+ Set<String> users = client.listLocalUsers(creds);
+ assertFalse("Should not see user after they are deleted", users.contains(userName));
+
+ if (isKerberosEnabled()) {
+ userProxyClient.close();
+ proxyClient = origProxyClient;
+ client = origClient;
+ }
+ }
+
+ @Test
+ public void testBatchWriter() throws Exception {
+ client.addConstraint(creds, table, NumericValueConstraint.class.getName());
+ // zookeeper propagation time
+ UtilWaitThread.sleep(ZOOKEEPER_PROPAGATION_TIME);
+
+ WriterOptions writerOptions = new WriterOptions();
+ writerOptions.setLatencyMs(10000);
+ writerOptions.setMaxMemory(2);
+ writerOptions.setThreads(1);
+ writerOptions.setTimeoutMs(100000);
+
+ Map<String,Integer> constraints = client.listConstraints(creds, table);
+ while (!constraints.containsKey(NumericValueConstraint.class.getName())) {
+ log.info("Constraints don't contain NumericValueConstraint");
+ Thread.sleep(2000);
+ constraints = client.listConstraints(creds, table);
+ }
+
+ boolean success = false;
+ for (int i = 0; i < 15; i++) {
+ String batchWriter = client.createWriter(creds, table, writerOptions);
+ client.update(batchWriter, mutation("row1", "cf", "cq", "x"));
+ client.update(batchWriter, mutation("row1", "cf", "cq", "x"));
+ try {
+ client.flush(batchWriter);
+ log.debug("Constraint failed to fire. Waiting and retrying");
+ Thread.sleep(5000);
+ continue;
+ } catch (MutationsRejectedException ex) {}
+ try {
+ client.closeWriter(batchWriter);
+ log.debug("Constraint failed to fire. Waiting and retrying");
+ Thread.sleep(5000);
+ continue;
+ } catch (MutationsRejectedException e) {}
+ success = true;
+ break;
+ }
+
+ if (!success) {
+ fail("constraint did not fire");
+ }
+
+ client.removeConstraint(creds, table, 2);
+
+ constraints = client.listConstraints(creds, table);
+ while (constraints.containsKey(NumericValueConstraint.class.getName())) {
+ log.info("Constraints still contains NumericValueConstraint");
+ Thread.sleep(2000);
+ constraints = client.listConstraints(creds, table);
+ }
+
+ assertScan(new String[][] {}, table);
+
+ UtilWaitThread.sleep(ZOOKEEPER_PROPAGATION_TIME);
+
+ writerOptions = new WriterOptions();
+ writerOptions.setLatencyMs(10000);
+ writerOptions.setMaxMemory(3000);
+ writerOptions.setThreads(1);
+ writerOptions.setTimeoutMs(100000);
+
+ success = false;
+ for (int i = 0; i < 15; i++) {
+ try {
+ String batchWriter = client.createWriter(creds, table, writerOptions);
+
+ client.update(batchWriter, mutation("row1", "cf", "cq", "x"));
+ client.flush(batchWriter);
+ client.closeWriter(batchWriter);
+ success = true;
+ break;
+ } catch (MutationsRejectedException e) {
+ log.info("Mutations were rejected, assuming constraint is still active", e);
+ Thread.sleep(5000);
+ }
+ }
+
+ if (!success) {
+ fail("Failed to successfully write data after constraint was removed");
+ }
+
+ assertScan(new String[][] {{"row1", "cf", "cq", "x"}}, table);
+
+ client.deleteTable(creds, table);
+ }
+
+ @Test
+ public void testTableConstraints() throws Exception {
+ log.debug("Setting NumericValueConstraint on " + table);
+
+ // constraints
+ client.addConstraint(creds, table, NumericValueConstraint.class.getName());
+
+ // zookeeper propagation time
+ Thread.sleep(ZOOKEEPER_PROPAGATION_TIME);
+
+ log.debug("Attempting to verify client-side that constraints are observed");
+
+ Map<String,Integer> constraints = client.listConstraints(creds, table);
+ while (!constraints.containsKey(NumericValueConstraint.class.getName())) {
+ log.debug("Constraints don't contain NumericValueConstraint");
+ Thread.sleep(2000);
+ constraints = client.listConstraints(creds, table);
+ }
+
+ assertEquals(2, client.listConstraints(creds, table).size());
+ log.debug("Verified client-side that constraints exist");
+
+ // Write data that satisfies the constraint
+ client.updateAndFlush(creds, table, mutation("row1", "cf", "cq", "123"));
+
+ log.debug("Successfully wrote data that satisfies the constraint");
+ log.debug("Trying to write data that the constraint should reject");
+
+ // Expect failure on data that fails the constraint
+ while (true) {
+ try {
+ client.updateAndFlush(creds, table, mutation("row1", "cf", "cq", "x"));
+ log.debug("Expected mutation to be rejected, but was not. Waiting and retrying");
+ Thread.sleep(5000);
+ } catch (MutationsRejectedException ex) {
+ break;
+ }
+ }
+
+ log.debug("Saw expected failure on data which fails the constraint");
+
+ log.debug("Removing constraint from table");
+ client.removeConstraint(creds, table, 2);
+
+ UtilWaitThread.sleep(ZOOKEEPER_PROPAGATION_TIME);
+
+ constraints = client.listConstraints(creds, table);
+ while (constraints.containsKey(NumericValueConstraint.class.getName())) {
+ log.debug("Constraints contains NumericValueConstraint");
+ Thread.sleep(2000);
+ constraints = client.listConstraints(creds, table);
+ }
+
+ assertEquals(1, client.listConstraints(creds, table).size());
+ log.debug("Verified client-side that the constraint was removed");
+
+ log.debug("Attempting to write mutation that should succeed after constraints was removed");
+ // Make sure we can write the data after we removed the constraint
+ while (true) {
+ try {
+ client.updateAndFlush(creds, table, mutation("row1", "cf", "cq", "x"));
+ break;
+ } catch (MutationsRejectedException ex) {
+ log.debug("Expected mutation accepted, but was not. Waiting and retrying");
+ Thread.sleep(5000);
+ }
+ }
+
+ log.debug("Verifying that record can be read from the table");
+ assertScan(new String[][] {{"row1", "cf", "cq", "x"}}, table);
+ }
+
+ @Test
+ public void tableMergesAndSplits() throws Exception {
+ // add some splits
+ client.addSplits(creds, table, new HashSet<ByteBuffer>(Arrays.asList(s2bb("a"), s2bb("m"), s2bb("z"))));
+ List<ByteBuffer> splits = client.listSplits(creds, table, 1);
+ assertEquals(Arrays.asList(s2bb("m")), splits);
+
+ // Merge some of the splits away
+ client.mergeTablets(creds, table, null, s2bb("m"));
+ splits = client.listSplits(creds, table, 10);
+ assertEquals(Arrays.asList(s2bb("m"), s2bb("z")), splits);
+
+ // Merge the entire table
+ client.mergeTablets(creds, table, null, null);
+ splits = client.listSplits(creds, table, 10);
+ List<ByteBuffer> empty = Collections.emptyList();
+
+ // No splits after merge on whole table
+ assertEquals(empty, splits);
+ }
+
+ @Test
+ public void iteratorFunctionality() throws Exception {
+ // iterators
+ HashMap<String,String> options = new HashMap<String,String>();
+ options.put("type", "STRING");
+ options.put("columns", "cf");
+ IteratorSetting setting = new IteratorSetting(10, table, SummingCombiner.class.getName(), options);
+ client.attachIterator(creds, table, setting, EnumSet.allOf(IteratorScope.class));
+ for (int i = 0; i < 10; i++) {
+ client.updateAndFlush(creds, table, mutation("row1", "cf", "cq", "1"));
+ }
+ // 10 updates of "1" in the value w/ SummingCombiner should return value of "10"
+ assertScan(new String[][] {{"row1", "cf", "cq", "10"}}, table);
+
+ try {
+ client.checkIteratorConflicts(creds, table, setting, EnumSet.allOf(IteratorScope.class));
+ fail("checkIteratorConflicts did not throw an exception");
+ } catch (Exception ex) {
+ // Expected
+ }
+ client.deleteRows(creds, table, null, null);
+ client.removeIterator(creds, table, "test", EnumSet.allOf(IteratorScope.class));
+ String expected[][] = new String[10][];
+ for (int i = 0; i < 10; i++) {
+ client.updateAndFlush(creds, table, mutation("row" + i, "cf", "cq", "" + i));
+ expected[i] = new String[] {"row" + i, "cf", "cq", "" + i};
+ client.flushTable(creds, table, null, null, true);
+ }
+ assertScan(expected, table);
+ }
+
+ @Test
+ public void cloneTable() throws Exception {
+ String TABLE_TEST2 = getUniqueNames(2)[1];
+
+ String expected[][] = new String[10][];
+ for (int i = 0; i < 10; i++) {
+ client.updateAndFlush(creds, table, mutation("row" + i, "cf", "cq", "" + i));
+ expected[i] = new String[] {"row" + i, "cf", "cq", "" + i};
+ client.flushTable(creds, table, null, null, true);
+ }
+ assertScan(expected, table);
+
+ // clone
+ client.cloneTable(creds, table, TABLE_TEST2, true, null, null);
+ assertScan(expected, TABLE_TEST2);
+ client.deleteTable(creds, TABLE_TEST2);
+ }
+
+ @Test
+ public void clearLocatorCache() throws Exception {
+ // don't know how to test this, call it just for fun
+ client.clearLocatorCache(creds, table);
+ }
+
+ @Test
+ public void compactTable() throws Exception {
+ String expected[][] = new String[10][];
+ for (int i = 0; i < 10; i++) {
+ client.updateAndFlush(creds, table, mutation("row" + i, "cf", "cq", "" + i));
+ expected[i] = new String[] {"row" + i, "cf", "cq", "" + i};
+ client.flushTable(creds, table, null, null, true);
+ }
+ assertScan(expected, table);
+
+ // compact
+ client.compactTable(creds, table, null, null, null, true, true, null);
+ assertEquals(1, countFiles(table));
+ assertScan(expected, table);
+ }
+
+ @Test
+ public void diskUsage() throws Exception {
+ String TABLE_TEST2 = getUniqueNames(2)[1];
+
+ // Write some data
+ String expected[][] = new String[10][];
+ for (int i = 0; i < 10; i++) {
+ client.updateAndFlush(creds, table, mutation("row" + i, "cf", "cq", "" + i));
+ expected[i] = new String[] {"row" + i, "cf", "cq", "" + i};
+ client.flushTable(creds, table, null, null, true);
+ }
+ assertScan(expected, table);
+
+ // compact
+ client.compactTable(creds, table, null, null, null, true, true, null);
+ assertEquals(1, countFiles(table));
+ assertScan(expected, table);
+
+ // Clone the table
+ client.cloneTable(creds, table, TABLE_TEST2, true, null, null);
+ Set<String> tablesToScan = new HashSet<String>();
+ tablesToScan.add(table);
+ tablesToScan.add(TABLE_TEST2);
+ tablesToScan.add("foo");
+
+ client.createTable(creds, "foo", true, TimeType.MILLIS);
+
+ // get disk usage
+ List<DiskUsage> diskUsage = (client.getDiskUsage(creds, tablesToScan));
+ assertEquals(2, diskUsage.size());
+ // The original table and the clone are lumped together (they share the same files)
+ assertEquals(2, diskUsage.get(0).getTables().size());
+ // The empty table we created
+ assertEquals(1, diskUsage.get(1).getTables().size());
+
+ // Compact the clone so it writes its own files instead of referring to the original
+ client.compactTable(creds, TABLE_TEST2, null, null, null, true, true, null);
+
+ diskUsage = (client.getDiskUsage(creds, tablesToScan));
+ assertEquals(3, diskUsage.size());
+ // The original
+ assertEquals(1, diskUsage.get(0).getTables().size());
+ // The clone w/ its own files now
+ assertEquals(1, diskUsage.get(1).getTables().size());
+ // The empty table
+ assertEquals(1, diskUsage.get(2).getTables().size());
+ client.deleteTable(creds, "foo");
+ client.deleteTable(creds, TABLE_TEST2);
+ }
+
+ @Test
+ public void importExportTable() throws Exception {
+ // Write some data
+ String expected[][] = new String[10][];
+ for (int i = 0; i < 10; i++) {
+ client.updateAndFlush(creds, table, mutation("row" + i, "cf", "cq", "" + i));
+ expected[i] = new String[] {"row" + i, "cf", "cq", "" + i};
+ client.flushTable(creds, table, null, null, true);
+ }
+ assertScan(expected, table);
+
+ // export/import
+ MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
+ FileSystem fs = cluster.getFileSystem();
+ Path base = cluster.getTemporaryPath();
+ Path dir = new Path(base, "test");
+ assertTrue(fs.mkdirs(dir));
+ Path destDir = new Path(base, "test_dest");
+ assertTrue(fs.mkdirs(destDir));
+ client.offlineTable(creds, table, false);
+ client.exportTable(creds, table, dir.toString());
+ // copy files to a new location
+ FSDataInputStream is = fs.open(new Path(dir, "distcp.txt"));
+ try (BufferedReader r = new BufferedReader(new InputStreamReader(is))) {
+ while (true) {
+ String line = r.readLine();
+ if (line == null)
+ break;
+ Path srcPath = new Path(line);
+ FileUtil.copy(fs, srcPath, fs, destDir, false, fs.getConf());
+ }
+ }
+ client.deleteTable(creds, table);
+ client.importTable(creds, "testify", destDir.toString());
+ assertScan(expected, "testify");
+ client.deleteTable(creds, "testify");
+
+ try {
+ // ACCUMULO-1558 a second import from the same dir should fail, the first import moved the files
+ client.importTable(creds, "testify2", destDir.toString());
+ fail();
+ } catch (Exception e) {}
+
+ assertFalse(client.listTables(creds).contains("testify2"));
+ }
+
+ @Test
+ public void localityGroups() throws Exception {
+ Map<String,Set<String>> groups = new HashMap<String,Set<String>>();
+ groups.put("group1", Collections.singleton("cf1"));
+ groups.put("group2", Collections.singleton("cf2"));
+ client.setLocalityGroups(creds, table, groups);
+ assertEquals(groups, client.getLocalityGroups(creds, table));
+ }
+
+ @Test
+ public void tableProperties() throws Exception {
+ Map<String,String> systemProps = client.getSystemConfiguration(creds);
+ String systemTableSplitThreshold = systemProps.get("table.split.threshold");
+
+ Map<String,String> orig = client.getTableProperties(creds, table);
+ client.setTableProperty(creds, table, "table.split.threshold", "500M");
+
+ // Get the new table property value
+ Map<String,String> update = client.getTableProperties(creds, table);
+ assertEquals(update.get("table.split.threshold"), "500M");
+
+ // Table level properties shouldn't affect system level values
+ assertEquals(systemTableSplitThreshold, client.getSystemConfiguration(creds).get("table.split.threshold"));
+
+ client.removeTableProperty(creds, table, "table.split.threshold");
+ update = client.getTableProperties(creds, table);
+ assertEquals(orig, update);
+ }
+
+ @Test
+ public void tableRenames() throws Exception {
+ // rename table
+ Map<String,String> tables = client.tableIdMap(creds);
+ client.renameTable(creds, table, "bar");
+ Map<String,String> tables2 = client.tableIdMap(creds);
+ assertEquals(tables.get(table), tables2.get("bar"));
+ // table exists
+ assertTrue(client.tableExists(creds, "bar"));
+ assertFalse(client.tableExists(creds, table));
+ client.renameTable(creds, "bar", table);
+ }
+
+ @Test
+ public void bulkImport() throws Exception {
+ MiniAccumuloClusterImpl cluster = SharedMiniClusterBase.getCluster();
+ FileSystem fs = cluster.getFileSystem();
+ Path base = cluster.getTemporaryPath();
+ Path dir = new Path(base, "test");
+ assertTrue(fs.mkdirs(dir));
+
+ // Write an RFile
+ String filename = dir + "/bulk/import/rfile.rf";
+ FileSKVWriter writer = FileOperations.getInstance().openWriter(filename, fs, fs.getConf(), DefaultConfiguration.getInstance());
+ writer.startDefaultLocalityGroup();
+ writer.append(new org.apache.accumulo.core.data.Key(new Text("a"), new Text("b"), new Text("c")), new Value("value".getBytes()));
+ writer.close();
+
+ // Create failures directory
+ fs.mkdirs(new Path(dir + "/bulk/fail"));
+
+ // Run the bulk import
+ client.importDirectory(creds, table, dir + "/bulk/import", dir + "/bulk/fail", true);
+
+ // Make sure we find the data
+ String scanner = client.createScanner(creds, table, null);
+ ScanResult more = client.nextK(scanner, 100);
+ client.closeScanner(scanner);
+ assertEquals(1, more.results.size());
+ ByteBuffer maxRow = client.getMaxRow(creds, table, null, null, false, null, false);
+ assertEquals(s2bb("a"), maxRow);
+ }
+
+ @Test
+ public void testTableClassLoad() throws Exception {
+ assertFalse(client.testTableClassLoad(creds, table, "abc123", SortedKeyValueIterator.class.getName()));
+ assertTrue(client.testTableClassLoad(creds, table, VersioningIterator.class.getName(), SortedKeyValueIterator.class.getName()));
+ }
+
+ private Condition newCondition(String cf, String cq) {
+ return new Condition(new Column(s2bb(cf), s2bb(cq), s2bb("")));
+ }
+
+ private Condition newCondition(String cf, String cq, String val) {
+ return newCondition(cf, cq).setValue(s2bb(val));
+ }
+
+ private Condition newCondition(String cf, String cq, long ts, String val) {
+ return newCondition(cf, cq).setValue(s2bb(val)).setTimestamp(ts);
+ }
+
+ private ColumnUpdate newColUpdate(String cf, String cq, String val) {
+ return new ColumnUpdate(s2bb(cf), s2bb(cq)).setValue(s2bb(val));
+ }
+
+ private ColumnUpdate newColUpdate(String cf, String cq, long ts, String val) {
+ return new ColumnUpdate(s2bb(cf), s2bb(cq)).setTimestamp(ts).setValue(s2bb(val));
+ }
+
+ private void assertScan(String[][] expected, String table) throws Exception {
+ String scid = client.createScanner(creds, table, new ScanOptions());
+ ScanResult keyValues = client.nextK(scid, expected.length + 1);
+
+ assertEquals("Saw " + keyValues.results, expected.length, keyValues.results.size());
+ assertFalse(keyValues.more);
+
+ for (int i = 0; i < keyValues.results.size(); i++) {
+ checkKey(expected[i][0], expected[i][1], expected[i][2], expected[i][3], keyValues.results.get(i));
+ }
+
+ client.closeScanner(scid);
+ }
+
+ @Test
+ public void testConditionalWriter() throws Exception {
+ log.debug("Adding constraint {} to {}", table, NumericValueConstraint.class.getName());
+ client.addConstraint(creds, table, NumericValueConstraint.class.getName());
+ UtilWaitThread.sleep(ZOOKEEPER_PROPAGATION_TIME);
+
+ while (!client.listConstraints(creds, table).containsKey(NumericValueConstraint.class.getName())) {
+ log.info("Failed to see constraint");
+ Thread.sleep(1000);
+ }
+
+ String cwid = client.createConditionalWriter(creds, table, new ConditionalWriterOptions());
+
+ Map<ByteBuffer,ConditionalUpdates> updates = new HashMap<ByteBuffer,ConditionalUpdates>();
+
+ updates.put(
+ s2bb("00345"),
+ new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq")), Arrays.asList(newColUpdate("meta", "seq", 10, "1"),
+ newColUpdate("data", "img", "73435435"))));
+
+ Map<ByteBuffer,ConditionalStatus> results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(1, results.size());
+ assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00345")));
+
+ assertScan(new String[][] { {"00345", "data", "img", "73435435"}, {"00345", "meta", "seq", "1"}}, table);
+
+ // test not setting values on conditions
+ updates.clear();
+
+ updates.put(s2bb("00345"), new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq")), Arrays.asList(newColUpdate("meta", "seq", "2"))));
+ updates.put(s2bb("00346"), new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq")), Arrays.asList(newColUpdate("meta", "seq", "1"))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(2, results.size());
+ assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00345")));
+ assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00346")));
+
+ assertScan(new String[][] { {"00345", "data", "img", "73435435"}, {"00345", "meta", "seq", "1"}, {"00346", "meta", "seq", "1"}}, table);
+
+ // test setting values on conditions
+ updates.clear();
+
+ updates.put(
+ s2bb("00345"),
+ new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq", "1")), Arrays.asList(newColUpdate("meta", "seq", 20, "2"),
+ newColUpdate("data", "img", "567890"))));
+
+ updates.put(s2bb("00346"), new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq", "2")), Arrays.asList(newColUpdate("meta", "seq", "3"))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(2, results.size());
+ assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00345")));
+ assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00346")));
+
+ assertScan(new String[][] { {"00345", "data", "img", "567890"}, {"00345", "meta", "seq", "2"}, {"00346", "meta", "seq", "1"}}, table);
+
+ // test setting timestamp on condition to a non-existant version
+ updates.clear();
+
+ updates.put(
+ s2bb("00345"),
+ new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq", 10, "2")), Arrays.asList(newColUpdate("meta", "seq", 30, "3"),
+ newColUpdate("data", "img", "1234567890"))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(1, results.size());
+ assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00345")));
+
+ assertScan(new String[][] { {"00345", "data", "img", "567890"}, {"00345", "meta", "seq", "2"}, {"00346", "meta", "seq", "1"}}, table);
+
+ // test setting timestamp to an existing version
+
+ updates.clear();
+
+ updates.put(
+ s2bb("00345"),
+ new ConditionalUpdates(Arrays.asList(newCondition("meta", "seq", 20, "2")), Arrays.asList(newColUpdate("meta", "seq", 30, "3"),
+ newColUpdate("data", "img", "1234567890"))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(1, results.size());
+ assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00345")));
+
+ assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"}}, table);
+
+ // run test w/ condition that has iterators
+ // following should fail w/o iterator
+ client.updateAndFlush(creds, table, Collections.singletonMap(s2bb("00347"), Arrays.asList(newColUpdate("data", "count", "1"))));
+ client.updateAndFlush(creds, table, Collections.singletonMap(s2bb("00347"), Arrays.asList(newColUpdate("data", "count", "1"))));
+ client.updateAndFlush(creds, table, Collections.singletonMap(s2bb("00347"), Arrays.asList(newColUpdate("data", "count", "1"))));
+
+ updates.clear();
+ updates.put(s2bb("00347"),
+ new ConditionalUpdates(Arrays.asList(newCondition("data", "count", "3")), Arrays.asList(newColUpdate("data", "img", "1234567890"))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(1, results.size());
+ assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00347")));
+
+ assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
+ {"00347", "data", "count", "1"}}, table);
+
+ // following test w/ iterator setup should succeed
+ Condition iterCond = newCondition("data", "count", "3");
+ Map<String,String> props = new HashMap<String,String>();
+ props.put("type", "STRING");
+ props.put("columns", "data:count");
+ IteratorSetting is = new IteratorSetting(1, "sumc", SummingCombiner.class.getName(), props);
+ iterCond.setIterators(Arrays.asList(is));
+
+ updates.clear();
+ updates.put(s2bb("00347"), new ConditionalUpdates(Arrays.asList(iterCond), Arrays.asList(newColUpdate("data", "img", "1234567890"))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(1, results.size());
+ assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00347")));
+
+ assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
+ {"00347", "data", "count", "1"}, {"00347", "data", "img", "1234567890"}}, table);
+
+ ConditionalStatus status = null;
+ for (int i = 0; i < 30; i++) {
+ // test a mutation that violated a constraint
+ updates.clear();
+ updates.put(s2bb("00347"),
+ new ConditionalUpdates(Arrays.asList(newCondition("data", "img", "1234567890")), Arrays.asList(newColUpdate("data", "count", "A"))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(1, results.size());
+ status = results.get(s2bb("00347"));
+ if (ConditionalStatus.VIOLATED != status) {
+ log.info("ConditionalUpdate was not rejected by server due to table constraint. Sleeping and retrying");
+ Thread.sleep(5000);
+ continue;
+ }
+
+ assertEquals(ConditionalStatus.VIOLATED, status);
+ break;
+ }
+
+ // Final check to make sure we succeeded and didn't exceed the retries
+ assertEquals(ConditionalStatus.VIOLATED, status);
+
+ assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
+ {"00347", "data", "count", "1"}, {"00347", "data", "img", "1234567890"}}, table);
+
+ // run test with two conditions
+ // both conditions should fail
+ updates.clear();
+ updates.put(
+ s2bb("00347"),
+ new ConditionalUpdates(Arrays.asList(newCondition("data", "img", "565"), newCondition("data", "count", "2")), Arrays.asList(
+ newColUpdate("data", "count", "3"), newColUpdate("data", "img", "0987654321"))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(1, results.size());
+ assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00347")));
+
+ assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
+ {"00347", "data", "count", "1"}, {"00347", "data", "img", "1234567890"}}, table);
+
+ // one condition should fail
+ updates.clear();
+ updates.put(
+ s2bb("00347"),
+ new ConditionalUpdates(Arrays.asList(newCondition("data", "img", "1234567890"), newCondition("data", "count", "2")), Arrays.asList(
+ newColUpdate("data", "count", "3"), newColUpdate("data", "img", "0987654321"))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(1, results.size());
+ assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00347")));
+
+ assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
+ {"00347", "data", "count", "1"}, {"00347", "data", "img", "1234567890"}}, table);
+
+ // one condition should fail
+ updates.clear();
+ updates.put(
+ s2bb("00347"),
+ new ConditionalUpdates(Arrays.asList(newCondition("data", "img", "565"), newCondition("data", "count", "1")), Arrays.asList(
+ newColUpdate("data", "count", "3"), newColUpdate("data", "img", "0987654321"))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(1, results.size());
+ assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00347")));
+
+ assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
+ {"00347", "data", "count", "1"}, {"00347", "data", "img", "1234567890"}}, table);
+
+ // both conditions should succeed
+
+ ConditionalStatus result = client.updateRowConditionally(
+ creds,
+ table,
+ s2bb("00347"),
+ new ConditionalUpdates(Arrays.asList(newCondition("data", "img", "1234567890"), newCondition("data", "count", "1")), Arrays.asList(
+ newColUpdate("data", "count", "3"), newColUpdate("data", "img", "0987654321"))));
+
+ assertEquals(ConditionalStatus.ACCEPTED, result);
+
+ assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
+ {"00347", "data", "count", "3"}, {"00347", "data", "img", "0987654321"}}, table);
+
+ client.closeConditionalWriter(cwid);
+ try {
+ client.updateRowsConditionally(cwid, updates);
+ fail("conditional writer not closed");
+ } catch (UnknownWriter uk) {}
+
+ String principal;
+ ClusterUser cwuser = null;
+ if (isKerberosEnabled()) {
+ cwuser = getKdc().getClientPrincipal(1);
+ principal = cwuser.getPrincipal();
+ client.createLocalUser(creds, principal, s2bb("unused"));
+
+ } else {
+ principal = "cwuser";
+ // run test with colvis
+ client.createLocalUser(creds, principal, s2bb("bestpasswordever"));
+ }
+
+ client.changeUserAuthorizations(creds, principal, Collections.singleton(s2bb("A")));
+ client.grantTablePermission(creds, principal, table, TablePermission.WRITE);
+ client.grantTablePermission(creds, principal, table, TablePermission.READ);
+
+ TestProxyClient cwuserProxyClient = null;
+ Client origClient = null;
+ Map<String,String> cwProperties;
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(cwuser.getPrincipal(), cwuser.getKeytab().getAbsolutePath());
+ final UserGroupInformation cwuserUgi = UserGroupInformation.getCurrentUser();
+ // Re-login in and make a new connection. Can't use the previous one
+ cwuserProxyClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, cwuserUgi);
+ origClient = client;
+ client = cwuserProxyClient.proxy();
+ cwProperties = Collections.emptyMap();
+ } else {
+ cwProperties = Collections.singletonMap("password", "bestpasswordever");
+ }
+
+ try {
+ ByteBuffer cwCreds = client.login(principal, cwProperties);
+
+ cwid = client.createConditionalWriter(cwCreds, table, new ConditionalWriterOptions().setAuthorizations(Collections.singleton(s2bb("A"))));
+
+ updates.clear();
+ updates.put(
+ s2bb("00348"),
+ new ConditionalUpdates(Arrays.asList(new Condition(new Column(s2bb("data"), s2bb("c"), s2bb("A")))), Arrays.asList(newColUpdate("data", "seq", "1"),
+ newColUpdate("data", "c", "1").setColVisibility(s2bb("A")))));
+ updates
+ .put(
+ s2bb("00349"),
+ new ConditionalUpdates(Arrays.asList(new Condition(new Column(s2bb("data"), s2bb("c"), s2bb("B")))), Arrays.asList(newColUpdate("data", "seq",
+ "1"))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(2, results.size());
+ assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00348")));
+ assertEquals(ConditionalStatus.INVISIBLE_VISIBILITY, results.get(s2bb("00349")));
+
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ client = origClient;
+ }
+ // Verify that the original user can't see the updates with visibilities set
+ assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
+ {"00347", "data", "count", "3"}, {"00347", "data", "img", "0987654321"}, {"00348", "data", "seq", "1"}}, table);
+
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(cwuser.getPrincipal(), cwuser.getKeytab().getAbsolutePath());
+ client = cwuserProxyClient.proxy();
+ }
+
+ updates.clear();
+
+ updates.clear();
+ updates.put(s2bb("00348"), new ConditionalUpdates(Arrays.asList(new Condition(new Column(s2bb("data"), s2bb("c"), s2bb("A"))).setValue(s2bb("0"))),
+ Arrays.asList(newColUpdate("data", "seq", "2"), newColUpdate("data", "c", "2").setColVisibility(s2bb("A")))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(1, results.size());
+ assertEquals(ConditionalStatus.REJECTED, results.get(s2bb("00348")));
+
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ client = origClient;
+ }
+
+ // Same results as the original user
+ assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
+ {"00347", "data", "count", "3"}, {"00347", "data", "img", "0987654321"}, {"00348", "data", "seq", "1"}}, table);
+
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(cwuser.getPrincipal(), cwuser.getKeytab().getAbsolutePath());
+ client = cwuserProxyClient.proxy();
+ }
+
+ updates.clear();
+ updates.put(s2bb("00348"), new ConditionalUpdates(Arrays.asList(new Condition(new Column(s2bb("data"), s2bb("c"), s2bb("A"))).setValue(s2bb("1"))),
+ Arrays.asList(newColUpdate("data", "seq", "2"), newColUpdate("data", "c", "2").setColVisibility(s2bb("A")))));
+
+ results = client.updateRowsConditionally(cwid, updates);
+
+ assertEquals(1, results.size());
+ assertEquals(ConditionalStatus.ACCEPTED, results.get(s2bb("00348")));
+
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
+ client = origClient;
+ }
+
+ assertScan(new String[][] { {"00345", "data", "img", "1234567890"}, {"00345", "meta", "seq", "3"}, {"00346", "meta", "seq", "1"},
+ {"00347", "data", "count", "3"}, {"00347", "data", "img", "0987654321"}, {"00348", "data", "seq", "2"}}, table);
+
+ if (isKerberosEnabled()) {
+ UserGroupInformation.loginUserFromKeytab(cwuser.getPrincipal(), cwuser.getKeytab().getAbsolutePath());
+ client = cwuserProxyClient.proxy();
+ }
+
+ client.closeConditionalWriter(cwid);
+ try {
+ client.updateRowsConditionally(cwid, updates);
+ fail("conditional writer not closed");
+ } catch (UnknownWriter uk) {}
+ } finally {
+ if (isKerberosEnabled()) {
+ // Close th
<TRUNCATED>
[35/43] accumulo git commit: ACCUMULO-3871 move ITs into distro jar,
stop building test jar
Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
new file mode 100644
index 0000000..7c05a0f
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BatchWriterFlushIT.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class BatchWriterFlushIT extends AccumuloClusterHarness {
+
+ private static final int NUM_TO_FLUSH = 100000;
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 90;
+ }
+
+ @Test
+ public void run() throws Exception {
+ Connector c = getConnector();
+ String[] tableNames = getUniqueNames(2);
+ String bwft = tableNames[0];
+ c.tableOperations().create(bwft);
+ String bwlt = tableNames[1];
+ c.tableOperations().create(bwlt);
+ runFlushTest(bwft);
+ runLatencyTest(bwlt);
+
+ }
+
+ private void runLatencyTest(String tableName) throws Exception {
+ // should automatically flush after 2 seconds
+ BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig().setMaxLatency(1000, TimeUnit.MILLISECONDS));
+ Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
+
+ Mutation m = new Mutation(new Text(String.format("r_%10d", 1)));
+ m.put(new Text("cf"), new Text("cq"), new Value("1".getBytes(UTF_8)));
+ bw.addMutation(m);
+
+ UtilWaitThread.sleep(500);
+
+ int count = Iterators.size(scanner.iterator());
+
+ if (count != 0) {
+ throw new Exception("Flushed too soon");
+ }
+
+ UtilWaitThread.sleep(1500);
+
+ count = Iterators.size(scanner.iterator());
+
+ if (count != 1) {
+ throw new Exception("Did not flush");
+ }
+
+ bw.close();
+ }
+
+ private void runFlushTest(String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException,
+ Exception {
+ BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+ Scanner scanner = getConnector().createScanner(tableName, Authorizations.EMPTY);
+ Random r = new Random();
+
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < NUM_TO_FLUSH; j++) {
+ int row = i * NUM_TO_FLUSH + j;
+
+ Mutation m = new Mutation(new Text(String.format("r_%10d", row)));
+ m.put(new Text("cf"), new Text("cq"), new Value(("" + row).getBytes()));
+ bw.addMutation(m);
+ }
+
+ bw.flush();
+
+ // do a few random lookups into the data just flushed
+
+ for (int k = 0; k < 10; k++) {
+ int rowToLookup = r.nextInt(NUM_TO_FLUSH) + i * NUM_TO_FLUSH;
+
+ scanner.setRange(new Range(new Text(String.format("r_%10d", rowToLookup))));
+
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+
+ if (!iter.hasNext())
+ throw new Exception(" row " + rowToLookup + " not found after flush");
+
+ Entry<Key,Value> entry = iter.next();
+
+ if (iter.hasNext())
+ throw new Exception("Scanner returned too much");
+
+ verifyEntry(rowToLookup, entry);
+ }
+
+ // scan all data just flushed
+ scanner.setRange(new Range(new Text(String.format("r_%10d", i * NUM_TO_FLUSH)), true, new Text(String.format("r_%10d", (i + 1) * NUM_TO_FLUSH)), false));
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+
+ for (int j = 0; j < NUM_TO_FLUSH; j++) {
+ int row = i * NUM_TO_FLUSH + j;
+
+ if (!iter.hasNext())
+ throw new Exception("Scan stopped permaturely at " + row);
+
+ Entry<Key,Value> entry = iter.next();
+
+ verifyEntry(row, entry);
+ }
+
+ if (iter.hasNext())
+ throw new Exception("Scanner returned too much");
+
+ }
+
+ bw.close();
+
+ // test adding a mutation to a closed batch writer
+ boolean caught = false;
+ try {
+ bw.addMutation(new Mutation(new Text("foobar")));
+ } catch (IllegalStateException ise) {
+ caught = true;
+ }
+
+ if (!caught) {
+ throw new Exception("Adding to closed batch writer did not fail");
+ }
+ }
+
+ private void verifyEntry(int row, Entry<Key,Value> entry) throws Exception {
+ if (!entry.getKey().getRow().toString().equals(String.format("r_%10d", row))) {
+ throw new Exception("Unexpected key returned, expected " + row + " got " + entry.getKey());
+ }
+
+ if (!entry.getValue().toString().equals("" + row)) {
+ throw new Exception("Unexpected value, expected " + row + " got " + entry.getValue());
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BigRootTabletIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
new file mode 100644
index 0000000..11dcb66
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.Map;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import com.google.common.collect.Iterators;
+
+public class BigRootTabletIT extends AccumuloClusterHarness {
+ // ACCUMULO-542: A large root tablet will fail to load if it does't fit in the tserver scan buffers
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.TABLE_SCAN_MAXMEM.getKey(), "1024");
+ siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "60m");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ c.tableOperations().addSplits(MetadataTable.NAME, FunctionalTestUtils.splits("0 1 2 3 4 5 6 7 8 9 a".split(" ")));
+ String[] names = getUniqueNames(10);
+ for (String name : names) {
+ c.tableOperations().create(name);
+ c.tableOperations().flush(MetadataTable.NAME, null, null, true);
+ c.tableOperations().flush(RootTable.NAME, null, null, true);
+ }
+ cluster.stop();
+ cluster.start();
+ assertTrue(Iterators.size(c.createScanner(RootTable.NAME, Authorizations.EMPTY).iterator()) > 0);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BinaryIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BinaryIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BinaryIT.java
new file mode 100644
index 0000000..85716d5
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BinaryIT.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.test.TestBinaryRows;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class BinaryIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 90;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ runTest(c, tableName);
+ }
+
+ @Test
+ public void testPreSplit() throws Exception {
+ String tableName = getUniqueNames(1)[0];
+ Connector c = getConnector();
+ c.tableOperations().create(tableName);
+ SortedSet<Text> splits = new TreeSet<Text>();
+ splits.add(new Text("8"));
+ splits.add(new Text("256"));
+ c.tableOperations().addSplits(tableName, splits);
+ runTest(c, tableName);
+ }
+
+ public static void runTest(Connector c, String tableName) throws Exception {
+ BatchWriterOpts bwOpts = new BatchWriterOpts();
+ ScannerOpts scanOpts = new ScannerOpts();
+ TestBinaryRows.Opts opts = new TestBinaryRows.Opts();
+ opts.setTableName(tableName);
+ opts.start = 0;
+ opts.num = 100000;
+ opts.mode = "ingest";
+ TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
+ opts.mode = "verify";
+ TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
+ opts.start = 25000;
+ opts.num = 50000;
+ opts.mode = "delete";
+ TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
+ opts.start = 0;
+ opts.num = 25000;
+ opts.mode = "verify";
+ TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
+ opts.start = 75000;
+ opts.num = 25000;
+ opts.mode = "randomLookups";
+ TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
+ opts.start = 25000;
+ opts.num = 50000;
+ opts.mode = "verifyDeleted";
+ TestBinaryRows.runTest(c, opts, bwOpts, scanOpts);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BinaryStressIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BinaryStressIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BinaryStressIT.java
new file mode 100644
index 0000000..440d2cf
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BinaryStressIT.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.InstanceOperations;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class BinaryStressIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "3s");
+ cfg.setProperty(Property.TSERV_MAXMEM, "50K");
+ cfg.setProperty(Property.TSERV_MAJC_DELAY, "0");
+ }
+
+ private String majcDelay, maxMem;
+
+ @Before
+ public void alterConfig() throws Exception {
+ if (ClusterType.MINI == getClusterType()) {
+ return;
+ }
+
+ InstanceOperations iops = getConnector().instanceOperations();
+ Map<String,String> conf = iops.getSystemConfiguration();
+ majcDelay = conf.get(Property.TSERV_MAJC_DELAY.getKey());
+ maxMem = conf.get(Property.TSERV_MAXMEM.getKey());
+
+ iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "0");
+ iops.setProperty(Property.TSERV_MAXMEM.getKey(), "50K");
+
+ getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ getClusterControl().startAllServers(ServerType.TABLET_SERVER);
+ }
+
+ @After
+ public void resetConfig() throws Exception {
+ if (null != majcDelay) {
+ InstanceOperations iops = getConnector().instanceOperations();
+ iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
+ iops.setProperty(Property.TSERV_MAXMEM.getKey(), maxMem);
+
+ getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ getClusterControl().startAllServers(ServerType.TABLET_SERVER);
+ }
+ }
+
+ @Test
+ public void binaryStressTest() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+ BinaryIT.runTest(c, tableName);
+ String id = c.tableOperations().tableIdMap().get(tableName);
+ Set<Text> tablets = new HashSet<>();
+ Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(Range.prefix(id));
+ for (Entry<Key,Value> entry : s) {
+ tablets.add(entry.getKey().getRow());
+ }
+ assertTrue("Expected at least 8 tablets, saw " + tablets.size(), tablets.size() > 7);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BloomFilterIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BloomFilterIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BloomFilterIT.java
new file mode 100644
index 0000000..fbbe542
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BloomFilterIT.java
@@ -0,0 +1,256 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor;
+import org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor;
+import org.apache.accumulo.core.file.keyfunctor.RowFunctor;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.fate.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.MemoryUnit;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class BloomFilterIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(BloomFilterIT.class);
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setDefaultMemory(1, MemoryUnit.GIGABYTE);
+ cfg.setNumTservers(1);
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.TSERV_TOTAL_MUTATION_QUEUE_MAX.getKey(), "10M");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 6 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ final String readAhead = c.instanceOperations().getSystemConfiguration().get(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey());
+ c.instanceOperations().setProperty(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey(), "1");
+ try {
+ Thread.sleep(1000);
+ final String[] tables = getUniqueNames(4);
+ for (String table : tables) {
+ TableOperations tops = c.tableOperations();
+ tops.create(table);
+ tops.setProperty(table, Property.TABLE_INDEXCACHE_ENABLED.getKey(), "false");
+ tops.setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "false");
+ tops.setProperty(table, Property.TABLE_BLOOM_SIZE.getKey(), "2000000");
+ tops.setProperty(table, Property.TABLE_BLOOM_ERRORRATE.getKey(), "1%");
+ tops.setProperty(table, Property.TABLE_BLOOM_LOAD_THRESHOLD.getKey(), "0");
+ tops.setProperty(table, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "64K");
+ }
+ log.info("Writing");
+ write(c, tables[0], 1, 0, 2000000000, 500);
+ write(c, tables[1], 2, 0, 2000000000, 500);
+ write(c, tables[2], 3, 0, 2000000000, 500);
+ log.info("Writing complete");
+
+ // test inserting an empty key
+ BatchWriter bw = c.createBatchWriter(tables[3], new BatchWriterConfig());
+ Mutation m = new Mutation(new Text(""));
+ m.put(new Text(""), new Text(""), new Value("foo1".getBytes()));
+ bw.addMutation(m);
+ bw.close();
+ c.tableOperations().flush(tables[3], null, null, true);
+
+ for (String table : Arrays.asList(tables[0], tables[1], tables[2])) {
+ c.tableOperations().compact(table, null, null, true, true);
+ }
+
+ // ensure compactions are finished
+ for (String table : tables) {
+ FunctionalTestUtils.checkRFiles(c, table, 1, 1, 1, 1);
+ }
+
+ // these queries should only run quickly if bloom filters are working, so lets get a base
+ log.info("Base query");
+ long t1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500);
+ long t2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500);
+ long t3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500);
+ log.info("Base query complete");
+
+ log.info("Rewriting with bloom filters");
+ c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+ c.tableOperations().setProperty(tables[0], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName());
+
+ c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+ c.tableOperations().setProperty(tables[1], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), ColumnFamilyFunctor.class.getName());
+
+ c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+ c.tableOperations().setProperty(tables[2], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), ColumnQualifierFunctor.class.getName());
+
+ c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_ENABLED.getKey(), "true");
+ c.tableOperations().setProperty(tables[3], Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName());
+
+ // ensure the updates to zookeeper propagate
+ UtilWaitThread.sleep(500);
+
+ c.tableOperations().compact(tables[3], null, null, false, true);
+ c.tableOperations().compact(tables[0], null, null, false, true);
+ c.tableOperations().compact(tables[1], null, null, false, true);
+ c.tableOperations().compact(tables[2], null, null, false, true);
+ log.info("Rewriting with bloom filters complete");
+
+ // these queries should only run quickly if bloom
+ // filters are working
+ log.info("Bloom query");
+ long tb1 = query(c, tables[0], 1, 0, 2000000000, 5000, 500);
+ long tb2 = query(c, tables[1], 2, 0, 2000000000, 5000, 500);
+ long tb3 = query(c, tables[2], 3, 0, 2000000000, 5000, 500);
+ log.info("Bloom query complete");
+ timeCheck(t1 + t2 + t3, tb1 + tb2 + tb3);
+
+ // test querying for empty key
+ Scanner scanner = c.createScanner(tables[3], Authorizations.EMPTY);
+ scanner.setRange(new Range(new Text("")));
+
+ if (!scanner.iterator().next().getValue().toString().equals("foo1")) {
+ throw new Exception("Did not see foo1");
+ }
+ } finally {
+ c.instanceOperations().setProperty(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey(), readAhead);
+ }
+ }
+
+ private void timeCheck(long t1, long t2) throws Exception {
+ double improvement = (t1 - t2) * 1.0 / t1;
+ if (improvement < .1) {
+ throw new Exception("Queries had less than 10% improvement (old: " + t1 + " new: " + t2 + " improvement: " + (improvement * 100) + "%)");
+ }
+ log.info(String.format("Improvement: %.2f%% (%d vs %d)", (improvement * 100), t1, t2));
+ }
+
+ private long query(Connector c, String table, int depth, long start, long end, int num, int step) throws Exception {
+ Random r = new Random(42);
+
+ HashSet<Long> expected = new HashSet<Long>();
+ List<Range> ranges = new ArrayList<Range>(num);
+ Text key = new Text();
+ Text row = new Text("row"), cq = new Text("cq"), cf = new Text("cf");
+
+ for (int i = 0; i < num; ++i) {
+ Long k = ((r.nextLong() & 0x7fffffffffffffffl) % (end - start)) + start;
+ key.set(String.format("k_%010d", k));
+ Range range = null;
+ Key acuKey;
+
+ if (k % (start + step) == 0) {
+ expected.add(k);
+ }
+
+ switch (depth) {
+ case 1:
+ range = new Range(new Text(key));
+ break;
+ case 2:
+ acuKey = new Key(row, key, cq);
+ range = new Range(acuKey, true, acuKey.followingKey(PartialKey.ROW_COLFAM), false);
+ break;
+ case 3:
+ acuKey = new Key(row, cf, key);
+ range = new Range(acuKey, true, acuKey.followingKey(PartialKey.ROW_COLFAM_COLQUAL), false);
+ break;
+ }
+
+ ranges.add(range);
+ }
+
+ BatchScanner bs = c.createBatchScanner(table, Authorizations.EMPTY, 1);
+ bs.setRanges(ranges);
+
+ long t1 = System.currentTimeMillis();
+ for (Entry<Key,Value> entry : bs) {
+ long v = Long.parseLong(entry.getValue().toString());
+ if (!expected.remove(v)) {
+ throw new Exception("Got unexpected return " + entry.getKey() + " " + entry.getValue());
+ }
+ }
+ long t2 = System.currentTimeMillis();
+
+ if (expected.size() > 0) {
+ throw new Exception("Did not get all expected values " + expected.size());
+ }
+
+ bs.close();
+
+ return t2 - t1;
+ }
+
+ private void write(Connector c, String table, int depth, long start, long end, int step) throws Exception {
+
+ BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
+
+ for (long i = start; i < end; i += step) {
+ String key = String.format("k_%010d", i);
+
+ Mutation m = null;
+
+ switch (depth) {
+ case 1:
+ m = new Mutation(new Text(key));
+ m.put(new Text("cf"), new Text("cq"), new Value(("" + i).getBytes()));
+ break;
+ case 2:
+ m = new Mutation(new Text("row"));
+ m.put(new Text(key), new Text("cq"), new Value(("" + i).getBytes()));
+ break;
+ case 3:
+ m = new Mutation(new Text("row"));
+ m.put(new Text("cf"), new Text(key), new Value(("" + i).getBytes()));
+ break;
+ }
+
+ bw.addMutation(m);
+ }
+
+ bw.close();
+
+ c.tableOperations().flush(table, null, null, true);
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BulkFileIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BulkFileIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BulkFileIT.java
new file mode 100644
index 0000000..1abafeb
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkFileIT.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVWriter;
+import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.MemoryUnit;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class BulkFileIT extends AccumuloClusterHarness {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration conf) {
+ cfg.setMemory(ServerType.TABLET_SERVER, 128 * 4, MemoryUnit.MEGABYTE);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ @Test
+ public void testBulkFile() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ SortedSet<Text> splits = new TreeSet<Text>();
+ for (String split : "0333 0666 0999 1333 1666".split(" "))
+ splits.add(new Text(split));
+ c.tableOperations().addSplits(tableName, splits);
+ Configuration conf = new Configuration();
+ AccumuloConfiguration aconf = new ServerConfigurationFactory(c.getInstance()).getConfiguration();
+ FileSystem fs = getCluster().getFileSystem();
+
+ String rootPath = cluster.getTemporaryPath().toString();
+
+ String dir = rootPath + "/bulk_test_diff_files_89723987592_" + getUniqueNames(1)[0];
+
+ fs.delete(new Path(dir), true);
+
+ FileSKVWriter writer1 = FileOperations.getInstance().openWriter(dir + "/f1." + RFile.EXTENSION, fs, conf, aconf);
+ writer1.startDefaultLocalityGroup();
+ writeData(writer1, 0, 333);
+ writer1.close();
+
+ FileSKVWriter writer2 = FileOperations.getInstance().openWriter(dir + "/f2." + RFile.EXTENSION, fs, conf, aconf);
+ writer2.startDefaultLocalityGroup();
+ writeData(writer2, 334, 999);
+ writer2.close();
+
+ FileSKVWriter writer3 = FileOperations.getInstance().openWriter(dir + "/f3." + RFile.EXTENSION, fs, conf, aconf);
+ writer3.startDefaultLocalityGroup();
+ writeData(writer3, 1000, 1999);
+ writer3.close();
+
+ FunctionalTestUtils.bulkImport(c, fs, tableName, dir);
+
+ FunctionalTestUtils.checkRFiles(c, tableName, 6, 6, 1, 1);
+
+ verifyData(tableName, 0, 1999);
+
+ }
+
+ private void verifyData(String table, int s, int e) throws Exception {
+ Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
+
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+
+ for (int i = s; i <= e; i++) {
+ if (!iter.hasNext())
+ throw new Exception("row " + i + " not found");
+
+ Entry<Key,Value> entry = iter.next();
+
+ String row = String.format("%04d", i);
+
+ if (!entry.getKey().getRow().equals(new Text(row)))
+ throw new Exception("unexpected row " + entry.getKey() + " " + i);
+
+ if (Integer.parseInt(entry.getValue().toString()) != i)
+ throw new Exception("unexpected value " + entry + " " + i);
+ }
+
+ if (iter.hasNext())
+ throw new Exception("found more than expected " + iter.next());
+ }
+
+ private void writeData(FileSKVWriter w, int s, int e) throws Exception {
+ for (int i = s; i <= e; i++) {
+ w.append(new Key(new Text(String.format("%04d", i))), new Value(Integer.toString(i).getBytes(UTF_8)));
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BulkIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BulkIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BulkIT.java
new file mode 100644
index 0000000..f60724e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkIT.java
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.TestIngest.Opts;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class BulkIT extends AccumuloClusterHarness {
+
+ private static final int N = 100000;
+ private static final int COUNT = 5;
+ private static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
+ private static final ScannerOpts SOPTS = new ScannerOpts();
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ private Configuration origConf;
+
+ @Before
+ public void saveConf() {
+ origConf = CachedConfiguration.getInstance();
+ }
+
+ @After
+ public void restoreConf() {
+ if (null != origConf) {
+ CachedConfiguration.setInstance(origConf);
+ }
+ }
+
+ @Test
+ public void test() throws Exception {
+ runTest(getConnector(), getCluster().getFileSystem(), getCluster().getTemporaryPath(), getAdminPrincipal(), getUniqueNames(1)[0],
+ this.getClass().getName(), testName.getMethodName());
+ }
+
+ static void runTest(Connector c, FileSystem fs, Path basePath, String principal, String tableName, String filePrefix, String dirSuffix) throws Exception {
+ c.tableOperations().create(tableName);
+ CachedConfiguration.setInstance(fs.getConf());
+
+ Path base = new Path(basePath, "testBulkFail_" + dirSuffix);
+ fs.delete(base, true);
+ fs.mkdirs(base);
+ Path bulkFailures = new Path(base, "failures");
+ Path files = new Path(base, "files");
+ fs.mkdirs(bulkFailures);
+ fs.mkdirs(files);
+
+ Opts opts = new Opts();
+ opts.timestamp = 1;
+ opts.random = 56;
+ opts.rows = N;
+ opts.instance = c.getInstance().getInstanceName();
+ opts.cols = 1;
+ opts.setTableName(tableName);
+ opts.conf = CachedConfiguration.getInstance();
+ opts.fs = fs;
+ String fileFormat = filePrefix + "rf%02d";
+ for (int i = 0; i < COUNT; i++) {
+ opts.outputFile = new Path(files, String.format(fileFormat, i)).toString();
+ opts.startRow = N * i;
+ TestIngest.ingest(c, opts, BWOPTS);
+ }
+ opts.outputFile = base + String.format(fileFormat, N);
+ opts.startRow = N;
+ opts.rows = 1;
+ // create an rfile with one entry, there was a bug with this:
+ TestIngest.ingest(c, opts, BWOPTS);
+
+ // Make sure the server can modify the files
+ FsShell fsShell = new FsShell(fs.getConf());
+ Assert.assertEquals("Failed to chmod " + base.toString(), 0, fsShell.run(new String[] {"-chmod", "-R", "777", base.toString()}));
+
+ c.tableOperations().importDirectory(tableName, files.toString(), bulkFailures.toString(), false);
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+ vopts.setTableName(tableName);
+ vopts.random = 56;
+ vopts.setPrincipal(principal);
+ for (int i = 0; i < COUNT; i++) {
+ vopts.startRow = i * N;
+ vopts.rows = N;
+ VerifyIngest.verifyIngest(c, vopts, SOPTS);
+ }
+ vopts.startRow = N;
+ vopts.rows = 1;
+ VerifyIngest.verifyIngest(c, vopts, SOPTS);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java b/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
new file mode 100644
index 0000000..74d3e96
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static com.google.common.base.Charsets.UTF_8;
+
+import org.apache.accumulo.core.cli.ClientOpts.Password;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
+import org.apache.accumulo.core.client.security.tokens.KerberosToken;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * This test verifies that when a lot of files are bulk imported into a table with one tablet and then splits that not all map files go to the children tablets.
+ */
+
+public class BulkSplitOptimizationIT extends AccumuloClusterHarness {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.TSERV_MAJC_DELAY, "1s");
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ private String majcDelay;
+
+ @Before
+ public void alterConfig() throws Exception {
+ Connector conn = getConnector();
+ majcDelay = conn.instanceOperations().getSystemConfiguration().get(Property.TSERV_MAJC_DELAY.getKey());
+ if (!"1s".equals(majcDelay)) {
+ conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1s");
+ getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ getClusterControl().startAllServers(ServerType.TABLET_SERVER);
+ }
+ }
+
+ @After
+ public void resetConfig() throws Exception {
+ if (null != majcDelay) {
+ Connector conn = getConnector();
+ conn.instanceOperations().setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
+ getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ getClusterControl().startAllServers(ServerType.TABLET_SERVER);
+ }
+ }
+
+ static final int ROWS = 100000;
+ static final int SPLITS = 99;
+
+ @Test
+ public void testBulkSplitOptimization() throws Exception {
+ final Connector c = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1000");
+ c.tableOperations().setProperty(tableName, Property.TABLE_FILE_MAX.getKey(), "1000");
+ c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "1G");
+
+ FileSystem fs = getFileSystem();
+ Path testDir = new Path(getUsableDir(), "testmf");
+ FunctionalTestUtils.createRFiles(c, fs, testDir.toString(), ROWS, SPLITS, 8);
+ FileStatus[] stats = fs.listStatus(testDir);
+
+ System.out.println("Number of generated files: " + stats.length);
+ FunctionalTestUtils.bulkImport(c, fs, tableName, testDir.toString());
+ FunctionalTestUtils.checkSplits(c, tableName, 0, 0);
+ FunctionalTestUtils.checkRFiles(c, tableName, 1, 1, 100, 100);
+
+ // initiate splits
+ getConnector().tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "100K");
+
+ UtilWaitThread.sleep(2000);
+
+ // wait until over split threshold -- should be 78 splits
+ while (getConnector().tableOperations().listSplits(tableName).size() < 75) {
+ UtilWaitThread.sleep(500);
+ }
+
+ FunctionalTestUtils.checkSplits(c, tableName, 50, 100);
+ VerifyIngest.Opts opts = new VerifyIngest.Opts();
+ opts.timestamp = 1;
+ opts.dataSize = 50;
+ opts.random = 56;
+ opts.rows = 100000;
+ opts.startRow = 0;
+ opts.cols = 1;
+ opts.setTableName(tableName);
+
+ AuthenticationToken adminToken = getAdminToken();
+ if (adminToken instanceof PasswordToken) {
+ PasswordToken token = (PasswordToken) getAdminToken();
+ opts.setPassword(new Password(new String(token.getPassword(), UTF_8)));
+ opts.setPrincipal(getAdminPrincipal());
+ } else if (adminToken instanceof KerberosToken) {
+ ClientConfiguration clientConf = cluster.getClientConfig();
+ opts.updateKerberosCredentials(clientConf);
+ } else {
+ Assert.fail("Unknown token type");
+ }
+
+ VerifyIngest.verifyIngest(c, opts, new ScannerOpts());
+
+ // ensure each tablet does not have all map files, should be ~2.5 files per tablet
+ FunctionalTestUtils.checkRFiles(c, tableName, 50, 100, 1, 4);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ChaoticBalancerIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ChaoticBalancerIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ChaoticBalancerIT.java
new file mode 100644
index 0000000..4055c3a
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ChaoticBalancerIT.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Map;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.master.balancer.ChaoticLoadBalancer;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class ChaoticBalancerIT extends AccumuloClusterHarness {
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ Map<String,String> siteConfig = cfg.getSiteConfig();
+ siteConfig.put(Property.TSERV_MAXMEM.getKey(), "10K");
+ siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "0");
+ cfg.setSiteConfig(siteConfig);
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ String[] names = getUniqueNames(2);
+ String tableName = names[0], unused = names[1];
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_LOAD_BALANCER.getKey(), ChaoticLoadBalancer.class.getName());
+ c.tableOperations().setProperty(tableName, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+ SortedSet<Text> splits = new TreeSet<Text>();
+ for (int i = 0; i < 100; i++) {
+ splits.add(new Text(String.format("%03d", i)));
+ }
+ c.tableOperations().create(unused);
+ c.tableOperations().addSplits(unused, splits);
+ TestIngest.Opts opts = new TestIngest.Opts();
+ VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+ vopts.rows = opts.rows = 20000;
+ opts.setTableName(tableName);
+ vopts.setTableName(tableName);
+ ClientConfiguration clientConfig = getCluster().getClientConfig();
+ if (clientConfig.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConfig);
+ vopts.updateKerberosCredentials(clientConfig);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ vopts.setPrincipal(getAdminPrincipal());
+ }
+ TestIngest.ingest(c, opts, new BatchWriterOpts());
+ c.tableOperations().flush(tableName, null, null, true);
+ VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/ClassLoaderIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ClassLoaderIT.java b/test/src/main/java/org/apache/accumulo/test/functional/ClassLoaderIT.java
new file mode 100644
index 0000000..c06feed
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ClassLoaderIT.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Combiner;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.hamcrest.CoreMatchers;
+import org.junit.Assume;
+import org.junit.Before;
+import org.junit.Test;
+
+public class ClassLoaderIT extends AccumuloClusterHarness {
+
+ private static final long ZOOKEEPER_PROPAGATION_TIME = 10 * 1000;
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ private String rootPath;
+
+ @Before
+ public void checkCluster() {
+ Assume.assumeThat(getClusterType(), CoreMatchers.is(ClusterType.MINI));
+ MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) getCluster();
+ rootPath = mac.getConfig().getDir().getAbsolutePath();
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("row1");
+ m.put("cf", "col1", "Test");
+ bw.addMutation(m);
+ bw.close();
+ scanCheck(c, tableName, "Test");
+ FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+ Path jarPath = new Path(rootPath + "/lib/ext/Test.jar");
+ fs.copyFromLocalFile(new Path(System.getProperty("user.dir") + "/src/test/resources/TestCombinerX.jar"), jarPath);
+ UtilWaitThread.sleep(1000);
+ IteratorSetting is = new IteratorSetting(10, "TestCombiner", "org.apache.accumulo.test.functional.TestCombiner");
+ Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("cf")));
+ c.tableOperations().attachIterator(tableName, is, EnumSet.of(IteratorScope.scan));
+ UtilWaitThread.sleep(ZOOKEEPER_PROPAGATION_TIME);
+ scanCheck(c, tableName, "TestX");
+ fs.delete(jarPath, true);
+ fs.copyFromLocalFile(new Path(System.getProperty("user.dir") + "/src/test/resources/TestCombinerY.jar"), jarPath);
+ UtilWaitThread.sleep(5000);
+ scanCheck(c, tableName, "TestY");
+ fs.delete(jarPath, true);
+ }
+
+ private void scanCheck(Connector c, String tableName, String expected) throws Exception {
+ Scanner bs = c.createScanner(tableName, Authorizations.EMPTY);
+ Iterator<Entry<Key,Value>> iterator = bs.iterator();
+ assertTrue(iterator.hasNext());
+ Entry<Key,Value> next = iterator.next();
+ assertFalse(iterator.hasNext());
+ assertEquals(expected, next.getValue().toString());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java
new file mode 100644
index 0000000..779b407
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Iterators;
+
+public class CleanTmpIT extends ConfigurableMacBase {
+ private static final Logger log = LoggerFactory.getLogger(CleanTmpIT.class);
+
+ @Override
+ public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "3s");
+ cfg.setNumTservers(1);
+ // use raw local file system so walogs sync and flush will work
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ @Test
+ public void test() throws Exception {
+ Connector c = getConnector();
+ // make a table
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ // write to it
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ Mutation m = new Mutation("row");
+ m.put("cf", "cq", "value");
+ bw.addMutation(m);
+ bw.flush();
+
+ // Compact memory to make a file
+ c.tableOperations().compact(tableName, null, null, true, true);
+
+ // Make sure that we'll have a WAL
+ m = new Mutation("row2");
+ m.put("cf", "cq", "value");
+ bw.addMutation(m);
+ bw.close();
+
+ // create a fake _tmp file in its directory
+ String id = c.tableOperations().tableIdMap().get(tableName);
+ Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.setRange(Range.prefix(id));
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+ Entry<Key,Value> entry = Iterables.getOnlyElement(s);
+ Path file = new Path(entry.getKey().getColumnQualifier().toString());
+
+ FileSystem fs = getCluster().getFileSystem();
+ assertTrue("Could not find file: " + file, fs.exists(file));
+ Path tabletDir = file.getParent();
+ assertNotNull("Tablet dir should not be null", tabletDir);
+ Path tmp = new Path(tabletDir, "junk.rf_tmp");
+ // Make the file
+ fs.create(tmp).close();
+ log.info("Created tmp file {}", tmp.toString());
+ getCluster().stop();
+ getCluster().start();
+
+ Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
+ assertEquals(2, Iterators.size(scanner.iterator()));
+ // If we performed log recovery, we should have cleaned up any stray files
+ assertFalse("File still exists: " + tmp, fs.exists(tmp));
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/CleanUpIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CleanUpIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CleanUpIT.java
new file mode 100644
index 0000000..1f6d1a0
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CleanUpIT.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.CleanUp;
+import org.apache.accumulo.harness.SharedMiniClusterBase;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Ensures that all threads spawned for ZooKeeper and Thrift connectivity are reaped after calling CleanUp.shutdown().
+ *
+ * Because this is destructive across the current context classloader, the normal teardown methods will fail (because they attempt to create a Connector). Until
+ * the ZooKeeperInstance and Connector are self-contained WRT resource management, we can't leverage the AccumuloClusterBase.
+ */
+public class CleanUpIT extends SharedMiniClusterBase {
+ private static final Logger log = LoggerFactory.getLogger(CleanUpIT.class);
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 30;
+ }
+
+ @Test
+ public void run() throws Exception {
+
+ String tableName = getUniqueNames(1)[0];
+ getConnector().tableOperations().create(tableName);
+
+ BatchWriter bw = getConnector().createBatchWriter(tableName, new BatchWriterConfig());
+
+ Mutation m1 = new Mutation("r1");
+ m1.put("cf1", "cq1", 1, "5");
+
+ bw.addMutation(m1);
+
+ bw.flush();
+
+ Scanner scanner = getConnector().createScanner(tableName, new Authorizations());
+
+ int count = 0;
+ for (Entry<Key,Value> entry : scanner) {
+ count++;
+ if (!entry.getValue().toString().equals("5")) {
+ Assert.fail("Unexpected value " + entry.getValue());
+ }
+ }
+
+ Assert.assertEquals("Unexpected count", 1, count);
+
+ int threadCount = countThreads();
+ if (threadCount < 2) {
+ printThreadNames();
+ Assert.fail("Not seeing expected threads. Saw " + threadCount);
+ }
+
+ CleanUp.shutdownNow();
+
+ Mutation m2 = new Mutation("r2");
+ m2.put("cf1", "cq1", 1, "6");
+
+ try {
+ bw.addMutation(m1);
+ bw.flush();
+ Assert.fail("batch writer did not fail");
+ } catch (Exception e) {
+
+ }
+
+ try {
+ // expect this to fail also, want to clean up batch writer threads
+ bw.close();
+ Assert.fail("batch writer close not fail");
+ } catch (Exception e) {
+
+ }
+
+ try {
+ count = 0;
+ Iterator<Entry<Key,Value>> iter = scanner.iterator();
+ while (iter.hasNext()) {
+ iter.next();
+ count++;
+ }
+ Assert.fail("scanner did not fail");
+ } catch (Exception e) {
+
+ }
+
+ threadCount = countThreads();
+ if (threadCount > 0) {
+ printThreadNames();
+ Assert.fail("Threads did not go away. Saw " + threadCount);
+ }
+ }
+
+ private void printThreadNames() {
+ Set<Thread> threads = Thread.getAllStackTraces().keySet();
+ Exception e = new Exception();
+ for (Thread thread : threads) {
+ e.setStackTrace(thread.getStackTrace());
+ log.info("thread name: " + thread.getName(), e);
+ }
+ }
+
+ /**
+ * count threads that should be cleaned up
+ *
+ */
+ private int countThreads() {
+ int count = 0;
+ Set<Thread> threads = Thread.getAllStackTraces().keySet();
+ for (Thread thread : threads) {
+
+ if (thread.getName().toLowerCase().contains("sendthread") || thread.getName().toLowerCase().contains("eventthread"))
+ count++;
+
+ if (thread.getName().toLowerCase().contains("thrift") && thread.getName().toLowerCase().contains("pool"))
+ count++;
+ }
+
+ return count;
+ }
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
new file mode 100644
index 0000000..b3d0ab5
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
@@ -0,0 +1,295 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.accumulo.cluster.AccumuloCluster;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.admin.DiskUsage;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class CloneTestIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 2 * 60;
+ }
+
+ @Test
+ public void testProps() throws Exception {
+ String[] tableNames = getUniqueNames(2);
+ String table1 = tableNames[0];
+ String table2 = tableNames[1];
+
+ Connector c = getConnector();
+
+ c.tableOperations().create(table1);
+
+ c.tableOperations().setProperty(table1, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1M");
+ c.tableOperations().setProperty(table1, Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey(), "2M");
+ c.tableOperations().setProperty(table1, Property.TABLE_FILE_MAX.getKey(), "23");
+
+ BatchWriter bw = writeData(table1, c);
+
+ Map<String,String> props = new HashMap<String,String>();
+ props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "500K");
+
+ Set<String> exclude = new HashSet<String>();
+ exclude.add(Property.TABLE_FILE_MAX.getKey());
+
+ c.tableOperations().clone(table1, table2, true, props, exclude);
+
+ Mutation m3 = new Mutation("009");
+ m3.put("data", "x", "1");
+ m3.put("data", "y", "2");
+ bw.addMutation(m3);
+ bw.close();
+
+ checkData(table2, c);
+
+ checkMetadata(table2, c);
+
+ HashMap<String,String> tableProps = new HashMap<String,String>();
+ for (Entry<String,String> prop : c.tableOperations().getProperties(table2)) {
+ tableProps.put(prop.getKey(), prop.getValue());
+ }
+
+ Assert.assertEquals("500K", tableProps.get(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey()));
+ Assert.assertEquals(Property.TABLE_FILE_MAX.getDefaultValue(), tableProps.get(Property.TABLE_FILE_MAX.getKey()));
+ Assert.assertEquals("2M", tableProps.get(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX.getKey()));
+
+ c.tableOperations().delete(table1);
+ c.tableOperations().delete(table2);
+
+ }
+
+ private void checkData(String table2, Connector c) throws TableNotFoundException {
+ Scanner scanner = c.createScanner(table2, Authorizations.EMPTY);
+
+ HashMap<String,String> expected = new HashMap<String,String>();
+ expected.put("001:x", "9");
+ expected.put("001:y", "7");
+ expected.put("008:x", "3");
+ expected.put("008:y", "4");
+
+ HashMap<String,String> actual = new HashMap<String,String>();
+
+ for (Entry<Key,Value> entry : scanner)
+ actual.put(entry.getKey().getRowData().toString() + ":" + entry.getKey().getColumnQualifierData().toString(), entry.getValue().toString());
+
+ Assert.assertEquals(expected, actual);
+ }
+
+ private void checkMetadata(String table, Connector conn) throws Exception {
+ Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+ MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(s);
+ String tableId = conn.tableOperations().tableIdMap().get(table);
+
+ Assert.assertNotNull("Could not get table id for " + table, tableId);
+
+ s.setRange(Range.prefix(tableId));
+
+ Key k;
+ Text cf = new Text(), cq = new Text();
+ int itemsInspected = 0;
+ for (Entry<Key,Value> entry : s) {
+ itemsInspected++;
+ k = entry.getKey();
+ k.getColumnFamily(cf);
+ k.getColumnQualifier(cq);
+
+ if (cf.equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
+ Path p = new Path(cq.toString());
+ FileSystem fs = cluster.getFileSystem();
+ Assert.assertTrue("File does not exist: " + p, fs.exists(p));
+ } else if (cf.equals(MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily())) {
+ Assert.assertEquals("Saw unexpected cq", MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), cq);
+ Path tabletDir = new Path(entry.getValue().toString());
+ Path tableDir = tabletDir.getParent();
+ Path tablesDir = tableDir.getParent();
+
+ Assert.assertEquals(ServerConstants.TABLE_DIR, tablesDir.getName());
+ } else {
+ Assert.fail("Got unexpected key-value: " + entry);
+ throw new RuntimeException();
+ }
+ }
+
+ Assert.assertTrue("Expected to find metadata entries", itemsInspected > 0);
+ }
+
+ private BatchWriter writeData(String table1, Connector c) throws TableNotFoundException, MutationsRejectedException {
+ BatchWriter bw = c.createBatchWriter(table1, new BatchWriterConfig());
+
+ Mutation m1 = new Mutation("001");
+ m1.put("data", "x", "9");
+ m1.put("data", "y", "7");
+
+ Mutation m2 = new Mutation("008");
+ m2.put("data", "x", "3");
+ m2.put("data", "y", "4");
+
+ bw.addMutation(m1);
+ bw.addMutation(m2);
+
+ bw.flush();
+ return bw;
+ }
+
+ @Test
+ public void testDeleteClone() throws Exception {
+ String[] tableNames = getUniqueNames(3);
+ String table1 = tableNames[0];
+ String table2 = tableNames[1];
+ String table3 = tableNames[2];
+
+ Connector c = getConnector();
+ AccumuloCluster cluster = getCluster();
+ Assume.assumeTrue(cluster instanceof MiniAccumuloClusterImpl);
+ MiniAccumuloClusterImpl mac = (MiniAccumuloClusterImpl) cluster;
+ String rootPath = mac.getConfig().getDir().getAbsolutePath();
+
+ // verify that deleting a new table removes the files
+ c.tableOperations().create(table3);
+ writeData(table3, c).close();
+ c.tableOperations().flush(table3, null, null, true);
+ // check for files
+ FileSystem fs = getCluster().getFileSystem();
+ String id = c.tableOperations().tableIdMap().get(table3);
+ FileStatus[] status = fs.listStatus(new Path(rootPath + "/accumulo/tables/" + id));
+ assertTrue(status.length > 0);
+ // verify disk usage
+ List<DiskUsage> diskUsage = c.tableOperations().getDiskUsage(Collections.singleton(table3));
+ assertEquals(1, diskUsage.size());
+ assertTrue(diskUsage.get(0).getUsage() > 100);
+ // delete the table
+ c.tableOperations().delete(table3);
+ // verify its gone from the file system
+ Path tablePath = new Path(rootPath + "/accumulo/tables/" + id);
+ if (fs.exists(tablePath)) {
+ status = fs.listStatus(tablePath);
+ assertTrue(status == null || status.length == 0);
+ }
+
+ c.tableOperations().create(table1);
+
+ BatchWriter bw = writeData(table1, c);
+
+ Map<String,String> props = new HashMap<String,String>();
+ props.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "500K");
+
+ Set<String> exclude = new HashSet<String>();
+ exclude.add(Property.TABLE_FILE_MAX.getKey());
+
+ c.tableOperations().clone(table1, table2, true, props, exclude);
+
+ Mutation m3 = new Mutation("009");
+ m3.put("data", "x", "1");
+ m3.put("data", "y", "2");
+ bw.addMutation(m3);
+ bw.close();
+
+ // delete source table, should not affect clone
+ c.tableOperations().delete(table1);
+
+ checkData(table2, c);
+
+ c.tableOperations().compact(table2, null, null, true, true);
+
+ checkData(table2, c);
+
+ c.tableOperations().delete(table2);
+
+ }
+
+ @Test
+ public void testCloneWithSplits() throws Exception {
+ Connector conn = getConnector();
+
+ List<Mutation> mutations = new ArrayList<Mutation>();
+ TreeSet<Text> splits = new TreeSet<Text>();
+ for (int i = 0; i < 10; i++) {
+ splits.add(new Text(Integer.toString(i)));
+ Mutation m = new Mutation(Integer.toString(i));
+ m.put("", "", "");
+ mutations.add(m);
+ }
+
+ String[] tables = getUniqueNames(2);
+
+ conn.tableOperations().create(tables[0]);
+
+ conn.tableOperations().addSplits(tables[0], splits);
+
+ BatchWriter bw = conn.createBatchWriter(tables[0], new BatchWriterConfig());
+ bw.addMutations(mutations);
+ bw.close();
+
+ conn.tableOperations().clone(tables[0], tables[1], true, null, null);
+
+ conn.tableOperations().deleteRows(tables[1], new Text("4"), new Text("8"));
+
+ List<String> rows = Arrays.asList("0", "1", "2", "3", "4", "9");
+ List<String> actualRows = new ArrayList<String>();
+ for (Entry<Key,Value> entry : conn.createScanner(tables[1], Authorizations.EMPTY)) {
+ actualRows.add(entry.getKey().getRow().toString());
+ }
+
+ Assert.assertEquals(rows, actualRows);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/CombinerIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CombinerIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CombinerIT.java
new file mode 100644
index 0000000..d4ef18e
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CombinerIT.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.LongCombiner.Type;
+import org.apache.accumulo.core.iterators.user.SummingCombiner;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.junit.Test;
+
+public class CombinerIT extends AccumuloClusterHarness {
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 60;
+ }
+
+ private void checkSum(String tableName, Connector c) throws Exception {
+ Scanner s = c.createScanner(tableName, Authorizations.EMPTY);
+ Iterator<Entry<Key,Value>> i = s.iterator();
+ assertTrue(i.hasNext());
+ Entry<Key,Value> entry = i.next();
+ assertEquals("45", entry.getValue().toString());
+ assertFalse(i.hasNext());
+ }
+
+ @Test
+ public void aggregationTest() throws Exception {
+ Connector c = getConnector();
+ String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ IteratorSetting setting = new IteratorSetting(10, SummingCombiner.class);
+ SummingCombiner.setEncodingType(setting, Type.STRING);
+ SummingCombiner.setColumns(setting, Collections.singletonList(new IteratorSetting.Column("cf")));
+ c.tableOperations().attachIterator(tableName, setting);
+ BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig());
+ for (int i = 0; i < 10; i++) {
+ Mutation m = new Mutation("row1");
+ m.put("cf".getBytes(), "col1".getBytes(), ("" + i).getBytes());
+ bw.addMutation(m);
+ }
+ bw.close();
+ checkSum(tableName, c);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/accumulo/blob/01ae5b85/test/src/main/java/org/apache/accumulo/test/functional/CompactionIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CompactionIT.java b/test/src/main/java/org/apache/accumulo/test/functional/CompactionIT.java
new file mode 100644
index 0000000..862365f
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CompactionIT.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.accumulo.core.cli.ClientOpts.Password;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.ClientConfiguration;
+import org.apache.accumulo.core.client.ClientConfiguration.ClientProperty;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.InstanceOperations;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.harness.AccumuloClusterHarness;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Iterators;
+
+public class CompactionIT extends AccumuloClusterHarness {
+ private static final Logger log = LoggerFactory.getLogger(CompactionIT.class);
+
+ @Override
+ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+ cfg.setProperty(Property.INSTANCE_ZK_TIMEOUT, "5s");
+ cfg.setProperty(Property.TSERV_MAJC_THREAD_MAXOPEN, "4");
+ cfg.setProperty(Property.TSERV_MAJC_DELAY, "1");
+ cfg.setProperty(Property.TSERV_MAJC_MAXCONCURRENT, "1");
+ // use raw local file system so walogs sync and flush will work
+ hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+ }
+
+ @Override
+ protected int defaultTimeoutSeconds() {
+ return 4 * 60;
+ }
+
+ private String majcThreadMaxOpen, majcDelay, majcMaxConcurrent;
+
+ @Before
+ public void alterConfig() throws Exception {
+ if (ClusterType.STANDALONE == getClusterType()) {
+ InstanceOperations iops = getConnector().instanceOperations();
+ Map<String,String> config = iops.getSystemConfiguration();
+ majcThreadMaxOpen = config.get(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey());
+ majcDelay = config.get(Property.TSERV_MAJC_DELAY.getKey());
+ majcMaxConcurrent = config.get(Property.TSERV_MAJC_MAXCONCURRENT.getKey());
+
+ iops.setProperty(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), "4");
+ iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), "1");
+ iops.setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
+
+ getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ getClusterControl().startAllServers(ServerType.TABLET_SERVER);
+ }
+ }
+
+ @After
+ public void resetConfig() throws Exception {
+ // We set the values..
+ if (null != majcThreadMaxOpen) {
+ InstanceOperations iops = getConnector().instanceOperations();
+
+ iops.setProperty(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), majcThreadMaxOpen);
+ iops.setProperty(Property.TSERV_MAJC_DELAY.getKey(), majcDelay);
+ iops.setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), majcMaxConcurrent);
+
+ getClusterControl().stopAllServers(ServerType.TABLET_SERVER);
+ getClusterControl().startAllServers(ServerType.TABLET_SERVER);
+ }
+ }
+
+ @Test
+ public void test() throws Exception {
+ final Connector c = getConnector();
+ final String tableName = getUniqueNames(1)[0];
+ c.tableOperations().create(tableName);
+ c.tableOperations().setProperty(tableName, Property.TABLE_MAJC_RATIO.getKey(), "1.0");
+ FileSystem fs = getFileSystem();
+ Path root = new Path(cluster.getTemporaryPath(), getClass().getName());
+ Path testrf = new Path(root, "testrf");
+ FunctionalTestUtils.createRFiles(c, fs, testrf.toString(), 500000, 59, 4);
+
+ FunctionalTestUtils.bulkImport(c, fs, tableName, testrf.toString());
+ int beforeCount = countFiles(c);
+
+ final AtomicBoolean fail = new AtomicBoolean(false);
+ final ClientConfiguration clientConf = cluster.getClientConfig();
+ for (int count = 0; count < 5; count++) {
+ List<Thread> threads = new ArrayList<Thread>();
+ final int span = 500000 / 59;
+ for (int i = 0; i < 500000; i += 500000 / 59) {
+ final int finalI = i;
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ try {
+ VerifyIngest.Opts opts = new VerifyIngest.Opts();
+ opts.startRow = finalI;
+ opts.rows = span;
+ opts.random = 56;
+ opts.dataSize = 50;
+ opts.cols = 1;
+ opts.setTableName(tableName);
+ if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
+ opts.updateKerberosCredentials(clientConf);
+ } else {
+ opts.setPrincipal(getAdminPrincipal());
+ PasswordToken passwordToken = (PasswordToken) getAdminToken();
+ opts.setPassword(new Password(new String(passwordToken.getPassword(), UTF_8)));
+ }
+ VerifyIngest.verifyIngest(c, opts, new ScannerOpts());
+ } catch (Exception ex) {
+ log.warn("Got exception verifying data", ex);
+ fail.set(true);
+ }
+ }
+ };
+ t.start();
+ threads.add(t);
+ }
+ for (Thread t : threads)
+ t.join();
+ assertFalse("Failed to successfully run all threads, Check the test output for error", fail.get());
+ }
+
+ int finalCount = countFiles(c);
+ assertTrue(finalCount < beforeCount);
+ try {
+ getClusterControl().adminStopAll();
+ } finally {
+ // Make sure the internal state in the cluster is reset (e.g. processes in MAC)
+ getCluster().stop();
+ if (ClusterType.STANDALONE == getClusterType()) {
+ // Then restart things for the next test if it's a standalone
+ getCluster().start();
+ }
+ }
+ }
+
+ private int countFiles(Connector c) throws Exception {
+ Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.TabletColumnFamily.NAME);
+ s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+ return Iterators.size(s.iterator());
+ }
+
+}