You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2013/07/17 04:33:03 UTC

[01/50] [abbrv] ACCUMULO-1537 converted many more functional tests to integration tests

Updated Branches:
  refs/heads/ACCUMULO-1496 5750b9020 -> f5324a227


http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/mapreduce.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/mapreduce.py b/test/system/auto/simple/mapreduce.py
deleted file mode 100755
index a63df9a..0000000
--- a/test/system/auto/simple/mapreduce.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import time
-import sys
-import subprocess
-import hashlib
-import base64
-import re
-import glob
-import TestUtils
-import socket
-from TestUtils import TestUtilsMixin
-
-log = logging.getLogger('test.auto')
-
-def globbase(root, name):
-    return glob.glob(os.path.join(root, name))[0]
-
-def globa(name):
-    return globbase(TestUtils.ACCUMULO_HOME, name)
-
-class MapReduceTest(TestUtilsMixin,unittest.TestCase):
-    """The test is used to test the functionality of a map reduce job on accumulo
-       Here are the steps of this test
-       1.Create a file called mapred_ftest_input with x number of lines with 1 value per line
-       2.Put file on Hadoop
-       3.Run Map Reduce Test that hashes the lines in the input (MD5) and puts each hash on its own row
-       4.Generate Hashes on the same input in test
-       5.Read table and compare hashes. Fail if they do not match
-       6.Delete mapred_ftset_input from hadoop 
-    """
-    order = 21
-
-    tablename = "mapredf"
-    input_cfcq = "cf-HASHTYPE:cq-NOTHASHED"
-    output_cfcq = "cf-HASHTYPE:cq-MD5BASE64"
-    example_class_to_run ="org.apache.accumulo.examples.simple.mapreduce.RowHash"
-    
-    def setUp(self):
-        if not os.getenv("ZOOKEEPER_HOME"):
-            self.fail("ZOOKEEPER_HOME environment variable is not set please set the location of ZOOKEEPER home in this environment variable")
-            return
-        TestUtilsMixin.setUp(self)
-        
-    def tearDown(self):
-        TestUtilsMixin.tearDown(self)
-    
-    def runTest(self):
-        #These Environment variables are need to run this test it will fail if they are not in the environment
-        thriftjar = globa(os.path.join('lib','libthrift.jar'))
-        examples = globa(os.path.join('lib','accumulo-examples-simple.jar'))
-        core = globa(os.path.join('lib','accumulo-core.jar'))
-        fate = globa(os.path.join('lib','accumulo-fate.jar'))
-        start = globa(os.path.join('lib','accumulo-start.jar'))
-        jcommander = globa(os.path.join('lib','jcommander.jar'))
-        trace = globa(os.path.join('lib','accumulo-trace.jar'))
-        zkjar = globbase(os.getenv("ZOOKEEPER_HOME"),"zookeeper*[!javadoc|src|bin].jar")
-        self.createInputTableInAccumulo();
-        #Arguments for the Example Class
-        arg_list = ['-i', TestUtils.INSTANCE_NAME,
-                    '-z', TestUtils.ZOOKEEPERS,
-                    '-u', TestUtils.ROOT,
-                    '-p', TestUtils.ROOT_PASSWORD,
-                    '-t', self.tablename,
-                    '--column', self.input_cfcq]
-        #MapReduce class to run
-        mapred_class= [self.accumulo_sh(),self.example_class_to_run]
-        #classes needed to run the mapreduce
-        libjars = ["-libjars",",".join([zkjar,thriftjar,examples,core,fate,trace,jcommander])]
-        cmd = mapred_class+libjars+arg_list
-        if(self.isAccumuloRunning()):
-            log.debug("COMMAND:"+str(cmd))
-            handle = self.runOn(self.masterHost(), cmd)
-            out, err = handle.communicate()
-
-            log.debug(out)
-            log.debug(err)
-            log.debug("Return code: "+str(handle.returncode))
-
-            log.debug("\n\n!!!FINISHED!!!\n\n")
-            if(handle.returncode==0):
-                self.checkResults()
-            else:
-                self.fail("Test did not finish")
-
-    def isAccumuloRunning(self):
-        output = subprocess.Popen(["jps","-m"],stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()[0]
-        if(output.find("tserver")!=-1 and output.find("master")!=-1):
-            return True
-        return False
-    
-    def retrieveValues(self,tablename,cfcq):
-        input = "table %s\nscan\n" % tablename
-        out,err,code = self.rootShell(self.masterHost(),input)
-        #print out
-        restr1 = "[0-9].*\[\]    (.*)"
-        restr2 = "[0-9] %s \[\]    (.*)"%(cfcq)
-        val_list = re.findall(restr2,out)
-        return val_list
-    
-    def checkResults(self):
-        control_values = [base64.b64encode(hashlib.md5("row%s"%(i)).digest()) for i in range(10)]
-        experiment_values = self.retrieveValues(self.tablename, self.output_cfcq)
-        self.failIf(len(control_values) != len(experiment_values), "List aren't the same length")
-        diff=[ev for ev in experiment_values if ev not in control_values]
-        self.failIf(len(diff)>0, "Start and MapReduced Values aren't not the same")
-    
-    def fakeMRResults(self):
-        vals = self.retrieveValues(self.tablename, self.input_cfcq)
-        values = ["insert %s %s %s\n" % (i,self.output_cfcq.replace(":"," "),base64.b64encode(hashlib.md5("row%s" % i).digest())) for i in range(10,20)]
-        input = "table %s\n" % (self.tablename,)+"".join(values)
-        out,err,code = self.rootShell(self.masterHost(),input)
-        #print "FAKE",out
-    
-    def createInputTableInAccumulo(self):
-        #my leet python list comprehensions skills in action
-        values = ["insert %s %s row%s\n" % (i,self.input_cfcq.replace(":"," "),i) for i in range(10)]
-        input = "createtable %s\ntable %s\n" % (self.tablename,self.tablename) + \
-                "".join(values)
-        out,err,code = self.rootShell(self.masterHost(),input)
-        #print "CREATE",out
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(MapReduceTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/maxOpen.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/maxOpen.py b/test/system/auto/simple/maxOpen.py
deleted file mode 100755
index 6341d76..0000000
--- a/test/system/auto/simple/maxOpen.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class MaxOpenTest(JavaTest):
-    "Test doing lookups that exceed max files open"
-
-    order = 21
-    testClass="org.apache.accumulo.test.functional.MaxOpenTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(MaxOpenTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/merge.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/merge.py b/test/system/auto/simple/merge.py
deleted file mode 100755
index a69e11a..0000000
--- a/test/system/auto/simple/merge.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-
-import unittest
-
-from TestUtils import TestUtilsMixin
-from JavaTest import JavaTest
-
-log = logging.getLogger('test.auto')
-
-class Merge(unittest.TestCase, TestUtilsMixin):
-    "Start a clean accumulo, split a table and merge part of it"
-    order = 80
-
-    def setUp(self):
-        TestUtilsMixin.setUp(self);
-
-    def runTest(self):
-        out, err, code = self.shell(self.masterHost(), '''
-createtable test
-addsplits a b c d e f g h i j k
-insert a cf cq value
-insert b cf cq value
-insert c cf cq value
-insert d cf cq value
-insert e cf cq value
-insert f cf cq value
-insert g cf cq value
-insert h cf cq value
-insert i cf cq value
-insert j cf cq value
-insert k cf cq value
-flush -w
-merge -b c1 -e f1
-getsplits
-quit
-''')
-        self.assert_(code == 0)
-        out = out[out.find('getsplits'):out.find('quit')]
-        self.assert_(len(out.split('\n')) == 10)
-        out, err, code = self.shell(self.masterHost(), '''
-scan -t test
-quit
-''')
-        out = out[out.find('test\n'):out.find('quit')]
-        self.assert_(len(out.split('\n')) == 13)
-
-
-class MergeSize(unittest.TestCase, TestUtilsMixin):
-    "Start a clean accumulo, split a table and merge based on size"
-    order = 80
-
-    def setUp(self):
-        TestUtilsMixin.setUp(self);
-
-    def runTest(self):
-        out, err, code = self.shell(self.masterHost(), '''
-createtable merge
-addsplits a b c d e f g h i j k l m n o p q r s t u v w x y z
-insert c cf cq mersydotesanddozeydotesanlittolamsiedives
-insert e cf cq mersydotesanddozeydotesanlittolamsiedives
-insert f cf cq mersydotesanddozeydotesanlittolamsiedives
-insert y cf cq mersydotesanddozeydotesanlittolamsiedives
-flush -w
-merge -s 100 -v
-getsplits
-merge -s 100 -f -v
-getsplits
-quit
-''')
-        self.assert_(code == 0)
-        out = out.split("getsplits")
-        firstMerge = out[-2]
-        firstMerge = firstMerge.strip().split('\n')[:-5]
-        self.assert_(firstMerge == ['b','c','d','e','f','x','y'])
-        secondMerge = out[-1]
-        secondMerge = secondMerge.strip().split('\n')[:-1]
-        self.assert_(secondMerge == ['c','e','f','y'])
-
-class MergeTest(JavaTest):
-    "Test Merge"
-
-    order = 92
-    testClass="org.apache.accumulo.test.functional.MergeTest"
-    maxRuntime = 200
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(Merge())
-    result.addTest(MergeSize())
-    result.addTest(MergeTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/mergeMetadata.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/mergeMetadata.py b/test/system/auto/simple/mergeMetadata.py
deleted file mode 100755
index cc34fb5..0000000
--- a/test/system/auto/simple/mergeMetadata.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-
-from TestUtils import TestUtilsMixin
-
-log = logging.getLogger('test.auto')
-
-class MergeMeta(unittest.TestCase, TestUtilsMixin):
-    """Split and merge the !METADATA table"""
-
-    order = 30
-
-    settings = TestUtilsMixin.settings.copy()
-
-    def setUp(self):
-        TestUtilsMixin.setUp(self);
-    
-    def runTest(self):
-        out, err, code = self.shell(self.masterHost(), '''
-addsplits -t !METADATA 1 2 3 4 5
-createtable a1
-createtable a2
-createtable a3
-createtable a4
-createtable a5
-merge -t !METADATA
-yes
-sleep 2
-scan -np -t !METADATA
-''')
-        assert code == 0
-        # look for delete entries for the abandoned directories
-        assert out.find('~del') >= 0
-
-class MergeMetaFail(unittest.TestCase, TestUtilsMixin):
-    """test a failed merge of the !METADATA table"""
-
-    order = 30
-
-    settings = TestUtilsMixin.settings.copy()
-
-    def setUp(self):
-        TestUtilsMixin.setUp(self);
-
-    def runTest(self):
-        out, err, code = self.shell(self.masterHost(), '''
-merge -t !METADATA -b ! -e !!
-''')
-        assert code != 0
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(MergeMeta())
-    result.addTest(MergeMetaFail())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/range.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/range.py b/test/system/auto/simple/range.py
deleted file mode 100755
index 130940a..0000000
--- a/test/system/auto/simple/range.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class RangeTest(JavaTest):
-    "Test scanning different ranges in accumulo"
-
-    order = 21
-    testClass = "org.apache.accumulo.test.functional.ScanRangeTest"
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(RangeTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/rowDelete.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/rowDelete.py b/test/system/auto/simple/rowDelete.py
deleted file mode 100755
index 5de97bd..0000000
--- a/test/system/auto/simple/rowDelete.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class RowDeleteTest(JavaTest):
-    "Row Deletion Test"
-
-    order = 22
-    testClass="org.apache.accumulo.test.functional.RowDeleteTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(RowDeleteTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/scanIter.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/scanIter.py b/test/system/auto/simple/scanIter.py
deleted file mode 100755
index 5fd048a..0000000
--- a/test/system/auto/simple/scanIter.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class ScanIteratorTest(JavaTest):
-    "Test setting iterators at scan time"
-
-    order = 21
-    testClass="org.apache.accumulo.test.functional.ScanIteratorTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(ScanIteratorTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/scanSessionTimeout.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/scanSessionTimeout.py b/test/system/auto/simple/scanSessionTimeout.py
deleted file mode 100755
index a379c48..0000000
--- a/test/system/auto/simple/scanSessionTimeout.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class ScanSessionTimeOutTest(JavaTest):
-    "Test a scan session that times out midway through"
-
-    order = 20
-    testClass="org.apache.accumulo.test.functional.ScanSessionTimeOutTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(ScanSessionTimeOutTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/security.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/security.py b/test/system/auto/simple/security.py
deleted file mode 100755
index 971dc11..0000000
--- a/test/system/auto/simple/security.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class SystemPermissionsTest(JavaTest):
-    "Tests accumulo system permissions"
-
-    order = 21
-    testClass="org.apache.accumulo.test.functional.PermissionsTest$SystemPermissionsTest"
-
-
-class TablePermissionsTest(JavaTest):
-    "Test accumulo table permissions"
-
-    order = 21
-    testClass="org.apache.accumulo.test.functional.PermissionsTest$TablePermissionsTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(SystemPermissionsTest())
-    result.addTest(TablePermissionsTest())
-    return result
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/serverSideError.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/serverSideError.py b/test/system/auto/simple/serverSideError.py
deleted file mode 100755
index 8b5545c..0000000
--- a/test/system/auto/simple/serverSideError.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class ServerSideErrorTest(JavaTest):
-    "Verify clients throw exception when there is unexpected exception on server side"
-
-    order = 30
-    testClass="org.apache.accumulo.test.functional.ServerSideErrorTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(ServerSideErrorTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/shutdown.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/shutdown.py b/test/system/auto/simple/shutdown.py
deleted file mode 100755
index 43807fb..0000000
--- a/test/system/auto/simple/shutdown.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import sleep
-import signal
-import socket
-from subprocess import PIPE
-
-from TestUtils import TestUtilsMixin, ROOT, ROOT_PASSWORD, FUZZ
-from simple.readwrite import SunnyDayTest
-
-log = logging.getLogger('test.auto')
-
-class ShutdownDuringIngest(SunnyDayTest):
-
-    order = SunnyDayTest.order + 1
-
-    def runTest(self):
-        self.shutdown_accumulo()
-        
-
-class ShutdownDuringQuery(SunnyDayTest):
-
-    order = SunnyDayTest.order + 1
-
-    def runTest(self):
-        self.waitForStop(self.ingester, self.waitTime())
-
-        log.info("Verifying Ingestion")
-        for i in range(10):
-            h = self.verify(self.masterHost(),
-                            self.options.rows,
-                            size=self.options.size)
-        self.shutdown_accumulo()
-
-class ShutdownDuringDelete(SunnyDayTest):
-    
-    order = SunnyDayTest.order + 1
-
-    def runTest(self):
-        self.waitForStop(self.ingester, self.waitTime())
-        h = self.runClassOn(self.masterHost(), "org.apache.accumulo.test.TestRandomDeletes", [])
-        self.shutdown_accumulo()
-
-
-class ShutdownDuringDeleteTable(TestUtilsMixin, unittest.TestCase):
-    
-    order = SunnyDayTest.order + 1
-
-    def runTest(self):
-        ct = ''
-        dt = ''
-        for i in range(10):
-            ct += 'createtable test%02d\n' % i
-            dt += 'deletetable test%02d\n' % i
-        out, err, code = self.shell(self.masterHost(), ct)
-        handle = self.runOn(self.masterHost(),
-                            [self.accumulo_sh(),
-                             'shell', '-u', ROOT, '-p', ROOT_PASSWORD],
-                            stdin=PIPE)
-        handle.stdin.write(dt)
-        self.shutdown_accumulo()
-
-class AdminStopDuringStart(TestUtilsMixin, unittest.TestCase):
-
-    order = SunnyDayTest.order + 1
-    
-    def runTest(self):
-        self.clean_accumulo(self.masterHost())
-        self.start_accumulo()
-        handle = self.runOn(self.masterHost(),
-                            [self.accumulo_sh(),'admin','stop', socket.getfqdn() + ":%d" % (39000 + FUZZ)])
-
-class AdminStop(SunnyDayTest):
-
-    order = SunnyDayTest.order + 1
-    
-    def runTest(self):
-        self.waitForStop(self.ingester, self.waitTime())
-        handle = self.runOn(self.masterHost(),
-                            [self.accumulo_sh(),'admin','stop', socket.getfqdn() + ":%d" % (39000 + FUZZ)])
-        self.shutdown_accumulo()
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(ShutdownDuringIngest())
-    result.addTest(ShutdownDuringQuery())
-    result.addTest(ShutdownDuringDelete())
-    result.addTest(ShutdownDuringDeleteTable())
-    result.addTest(AdminStopDuringStart())
-    result.addTest(AdminStop())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/simpleBalancer.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/simpleBalancer.py b/test/system/auto/simple/simpleBalancer.py
deleted file mode 100755
index 7b57d2a..0000000
--- a/test/system/auto/simple/simpleBalancer.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import time
-import sleep
-
-from TestUtils import TestUtilsMixin, ACCUMULO_DIR
-
-log = logging.getLogger('test.auto')
-
-from readwrite import SunnyDayTest, Interleaved
-from delete import DeleteTest
-
-class SimpleBalancerFairness(SunnyDayTest):
-    """Start a new table and make sure that active splits
-    are moved onto other servers"""
-
-    order = 80
-
-    settings = TestUtilsMixin.settings.copy()
-    settings.update({
-        'tserver.memory.maps.max':'10K',
-        'tserver.compaction.major.delay': 0,
-        })
-    tableSettings = SunnyDayTest.tableSettings.copy()
-    tableSettings['test_ingest'] = { 
-    	'table.split.threshold': '10K',
-        }
-    def setUp(self):
-        # ensure we have two servers
-        if len(self.options.hosts) == 1:
-            self.options.hosts.append('localhost')
-        self.options.hosts = self.options.hosts[:2]
-        
-        TestUtilsMixin.setUp(self);
-
-        # create a table with 200 splits
-        import tempfile
-        fileno, filename = tempfile.mkstemp()
-        fp = os.fdopen(fileno, "wb")
-        try:
-            for i in range(200):
-                fp.write("%08x\n" % (i * 1000))
-        finally:
-            fp.close()
-        self.createTable('unused', filename)
-
-        # create an empty table
-        self.createTable('test_ingest')
-
-    def runTest(self):
-
-        # start test ingestion
-        log.info("Starting Test Ingester")
-        self.ingester = self.ingest(self.masterHost(),
-                                    200000,
-                                    size=self.options.size)
-        self.waitForStop(self.ingester, 120)
-        self.shell(self.masterHost(), 'flush -t test_ingest')
-        self.waitForStop(self.verify(self.masterHost(), self.options.rows), 60)
-
-        # let the server split tablets and move them around
-        self.sleep(15)
-
-        # fetch the list of tablets from each server 
-        h = self.runOn(self.masterHost(),
-                       [self.accumulo_sh(),
-                        'org.apache.accumulo.test.GetMasterStats'])
-        out, err = h.communicate()
-        servers = {}
-        server = None
-        # if balanced based on ingest, the table that split due to ingest
-        # will be split evenly on both servers, not just one
-        table = ''
-        for line in out.split('\n'):
-            if line.find(' Name: ') == 0:
-                server = line[7:]
-                servers.setdefault(server, 0)
-            if line.find('Table: ') >= 0:
-                table = line.split(' ')[-1]
-            if line.find('    Tablets: ') == 0:
-                if table == '1':
-                   servers[server] += int(line.split()[-1])
-        log.info("Tablet counts " + repr(servers))
-
-        # we have two servers
-        self.assert_(len(servers.values()) == 2)
-        servers = servers.values()
-
-        # a server has more than 10 splits
-        self.assert_(servers[0] > 10)
-
-        # the ratio is roughly even
-        ratio = min(servers) / float(max(servers))
-        self.assert_(ratio > 0.5)
-        
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(SimpleBalancerFairness())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/sparseColumnFamily.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/sparseColumnFamily.py b/test/system/auto/simple/sparseColumnFamily.py
deleted file mode 100755
index e5675b1..0000000
--- a/test/system/auto/simple/sparseColumnFamily.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class SparseColumnFamilyTest(JavaTest):
-    "Test sparse column familes"
-
-    order = 21
-    testClass="org.apache.accumulo.test.functional.SparseColumnFamilyTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(SparseColumnFamilyTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/split.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/split.py b/test/system/auto/simple/split.py
deleted file mode 100755
index 9d38a2a..0000000
--- a/test/system/auto/simple/split.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import time
-import sleep
-
-from TestUtils import TestUtilsMixin, ACCUMULO_DIR
-
-log = logging.getLogger('test.auto')
-
-from readwrite import SunnyDayTest, Interleaved
-from delete import DeleteTest
-
-class TabletShouldSplit(SunnyDayTest):
-
-    order = 80
-
-    settings = TestUtilsMixin.settings.copy()
-    settings.update({
-        'tserver.memory.maps.max':'5K',
-        'tserver.compaction.major.delay': 1,
-        })
-    tableSettings = SunnyDayTest.tableSettings.copy()
-    tableSettings['test_ingest'] = { 
-    	'table.split.threshold': '256K',
-      'table.file.compress.blocksize': '1K',
-        }
-    def runTest(self):
-
-        self.waitForStop(self.ingester, 60)
-        self.waitForStop(self.verify(self.masterHost(), self.options.rows), 60)
-
-        # verify that we can read all the data: give it a minute to load
-        # tablets
-        self.waitForStop(self.verify(self.masterHost(), self.options.rows),
-                         120)
-
-        # let the server split tablets and move them around
-        self.sleep(10)
-        
-        # get the metadata
-        out, err, code = self.shell(self.masterHost(), 'table !METADATA\nscan -np\n')
-        self.assert_(code == 0)
-        lines = []
-        tableID = self.getTableId('test_ingest')
-        for line in out.split('\n'):
-            if line.find(tableID+';') >= 0:
-                line = line[line.find(';') + 1 : line.find(' ')].strip()
-                if line:
-                    lines.append(line)
-        # check that the row values aren't always whole rows, but something shorter
-        for line in lines:
-          if len(line) != 14:
-            break
-        else:
-          self.fail("The split points are not being shortened")
-
-        self.assert_(len(lines) > 10)
-
-        h = self.runOn(self.masterHost(), [self.accumulo_sh(),
-                                           'org.apache.accumulo.server.util.CheckForMetadataProblems',
-                                           '-u', 'root',
-                                           '-p', 'secret'])
-        out, err = h.communicate()
-        self.assert_(h.returncode == 0)
-        
-
-
-class InterleaveSplit(Interleaved):
-    order = 80
-
-    settings = TestUtilsMixin.settings.copy()
-    settings.update({
-        'tserver.memory.maps.max':'5K',
-        'tserver.compaction.major.delay': 1,
-        })
-    tableSettings = SunnyDayTest.tableSettings.copy()
-    tableSettings['test_ingest'] = { 
-    	'table.split.threshold': '10K',
-        }
-
-    def waitTime(self):
-        return Interleaved.waitTime(self) * 10
-
-    def runTest(self):
-        Interleaved.runTest(self)
-        handle = self.runOn(self.masterHost(), [
-            'hadoop', 'fs', '-ls', '%s/tables/%s' % (ACCUMULO_DIR,self.getTableId('test_ingest'))
-            ])
-        out, err = handle.communicate()
-        self.assert_(len(out.split('\n')) > 30)
-
-class DeleteSplit(DeleteTest):
-    order = 80
-        
-    settings = TestUtilsMixin.settings.copy()
-    settings.update({
-        'tserver.memory.maps.max': '50K',
-        'tserver.compaction.major.delay': 1,
-        })
-    tableSettings = SunnyDayTest.tableSettings.copy()
-    tableSettings['test_ingest'] = { 
-    	'table.split.threshold': '80K',
-        }
-
-    def runTest(self):
-        DeleteTest.runTest(self)
-        handle = self.runOn(self.masterHost(), [
-            'hadoop', 'fs', '-ls', '%s/tables/%s' % (ACCUMULO_DIR,self.getTableId('test_ingest'))
-            ])
-        out, err = handle.communicate()
-        self.assert_(len(out.split('\n')) > 20)
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(DeleteSplit())
-    result.addTest(TabletShouldSplit())
-    result.addTest(InterleaveSplit())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/start.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/start.py b/test/system/auto/simple/start.py
deleted file mode 100755
index bbf3fdf..0000000
--- a/test/system/auto/simple/start.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-
-import os
-from TestUtils import TestUtilsMixin, ROOT, ROOT_PASSWORD, ACCUMULO_DIR
-from subprocess import PIPE
-
-class Start(TestUtilsMixin, unittest.TestCase):
-
-    order = 21
-
-    def start(self, *args):
-        handle = self.runOn(self.masterHost(),
-                            [self.accumulo_sh(), 'org.apache.accumulo.start.TestMain'] + list(args), stdin=PIPE)
-        out, err = handle.communicate('')
-        return handle.returncode
-
-    def runTest(self):
-        assert self.start() != 0
-        assert self.start('success') == 0
-        assert self.start('exception') != 0
-        
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(Start())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/table.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/table.py b/test/system/auto/simple/table.py
deleted file mode 100755
index 89f346f..0000000
--- a/test/system/auto/simple/table.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import time
-import re
-
-from TestUtils import TestUtilsMixin, ROOT, ROOT_PASSWORD, ACCUMULO_DIR
-
-log = logging.getLogger('test.auto')
-
-import readwrite
-
-class TableTest(readwrite.SunnyDayTest):
-    "Make a table, use it, delete it, make it again"
-
-    order = 25
-
-    def createTable(self, table):
-        
-        import tempfile
-        fileno, filename = tempfile.mkstemp()
-        fp = os.fdopen(fileno, "wb") 
-        try:
-            for i in range(0, 999999, 1000):
-                fp.write("%08x\n" % i)
-            fp.flush()
-            fp.close()
-            readwrite.SunnyDayTest.createTable(self, table, filename)
-        finally:
-            os.unlink(filename)
-
-    def sshell(self, msg):
-        return self.rootShell(self.masterHost(), msg)
-
-    def runTest(self):
-        waitTime = 120 * self.options.rows / 1e6 + 60
-
-        self.waitForStop(self.ingester, 90)
-        self.waitForStop(self.verify(self.masterHost(), self.options.rows),
-                         waitTime)
-        
-        #grab the table id before the table is deleted
-        firstTID = self.getTableId('test_ingest')
-
-        #verify things exist before as we expect them to before deleting
-        #since after deleting we check for their absence... this will help
-        #detect changes in system behavior that might require test changes
-
-        #ensure entries in !METADATA
-        out, err, code = self.sshell("table !METADATA\nscan -np\n")
-        self.assert_(code == 0)
-        self.assert_(re.search('^'+firstTID, out, re.MULTILINE))
-
-        #ensure dir in hdfs
-        handle = self.runOn(self.masterHost(),
-                            ['hadoop', 'fs', '-ls', '%s/tables' % ACCUMULO_DIR])
-        out, err = handle.communicate()
-        self.assert_(out.find('%s/tables/%s' % (ACCUMULO_DIR,firstTID)) >= 0)
-
-        #delete the table
-        out, err, code = self.sshell("deletetable test_ingest\nyes\n")
-        self.assert_(code == 0)
-        self.shutdown_accumulo()
-        self.start_accumulo()
-   
-        #ensure no entries in !METADATA 
-        out, err, code = self.sshell("table !METADATA\nscan\n")
-        self.assert_(code == 0)
-        self.assert_(re.search('^'+firstTID, out, re.MULTILINE) == None)
-
-        #ensure no dir in HDFS
-        handle = self.runOn(self.masterHost(),
-                            ['hadoop', 'fs', '-ls', '%s/tables' % ACCUMULO_DIR])
-        out, err = handle.communicate()
-        self.assert_(out.find('%s/tables/%s' % (ACCUMULO_DIR,firstTID)) < 0)
-
-        out, err, code = self.sshell("table test_ingest\nscan -np\n")
-        self.assert_(code != 0)
-	self.assert_(out.find("Not in a table context.") >= 0)
-
-        self.createTable('test_ingest')
-        self.waitForStop(self.ingest(self.masterHost(), self.options.rows), 90)
-        self.waitForStop(self.verify(self.masterHost(), self.options.rows),
-                         waitTime)
-        self.shutdown_accumulo()
-        
-class CreateTableSplitFile(TableTest):
-    def createTable(self, table):
-        import tempfile
-        fileno, filename = tempfile.mkstemp()
-        fp = os.fdopen(fileno, "wb")
-        try:
-            fp.write("a\nb\nc\nd\na\nb\nc\n")
-            fp.close()
-            readwrite.SunnyDayTest.createTable(self, table, filename)
-        finally:
-            os.unlink(filename)
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(TableTest())
-    result.addTest(CreateTableSplitFile())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/tablets.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/tablets.py b/test/system/auto/simple/tablets.py
deleted file mode 100755
index e3e952d..0000000
--- a/test/system/auto/simple/tablets.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import time
-from subprocess import PIPE
-
-from TestUtils import TestUtilsMixin
-
-log = logging.getLogger('test.auto')
-
-N = 10000    # rows to insert, 100 rows per tablet
-WAIT = (N / 1000. + 1) * 60
-
-class LotsOfTablets(TestUtilsMixin, unittest.TestCase):
-
-    order = 80
-
-    settings = TestUtilsMixin.settings.copy()
-    settings.update({
-    	'table.split.threshold':200,
-        'tserver.memory.maps.max':'128M'
-        })
-
-    def runTest(self):
-
-        # initialize the database
-        handle = self.runClassOn(self.masterHost(), 
-		               'org.apache.accumulo.test.CreateTestTable', 
-		               [str(N)])
-	self.waitForStop(handle, WAIT)
-        handle = self.runClassOn(self.masterHost(), 
-		               'org.apache.accumulo.test.CreateTestTable', 
-		               ['-readonly', str(N)])
-        self.waitForStop(handle, WAIT)
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(LotsOfTablets())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/timeout.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/timeout.py b/test/system/auto/simple/timeout.py
deleted file mode 100755
index 6d7765c..0000000
--- a/test/system/auto/simple/timeout.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class TimeoutTest(JavaTest):
-    "Test time out"
-
-    order = 91
-    testClass="org.apache.accumulo.test.functional.TimeoutTest"
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(TimeoutTest())
-    return result


[17/50] [abbrv] git commit: ACCUMULO-1537 fix compile error

Posted by ct...@apache.org.
ACCUMULO-1537 fix compile error

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1500063 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/98791b93
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/98791b93
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/98791b93

Branch: refs/heads/ACCUMULO-1496
Commit: 98791b932db375c661fe6113d1311671600343ae
Parents: 390ca3f
Author: Christopher Tubbs <ct...@apache.org>
Authored: Fri Jul 5 16:33:07 2013 +0000
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Fri Jul 5 16:33:07 2013 +0000

----------------------------------------------------------------------
 .../test/functional/BigRootTabletIT.java        | 17 ++++++++-------
 .../test/functional/MetadataMaxFiles.java       | 22 ++++++++++----------
 2 files changed, 20 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/98791b93/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
index 308560f..6084aab 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
@@ -16,7 +16,7 @@
  */
 package org.apache.accumulo.test.functional;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertTrue;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -26,9 +26,9 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.junit.Test;
 
@@ -37,13 +37,13 @@ public class BigRootTabletIT extends MacTest {
   
   @Override
   public void configure(MiniAccumuloConfig cfg) {
-    Map<String,String> siteConfig = new HashMap<String, String>();
+    Map<String,String> siteConfig = new HashMap<String,String>();
     siteConfig.put(Property.TABLE_SCAN_MAXMEM.getKey(), "1024");
     siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "60m");
-    cfg.setSiteConfig(siteConfig );
+    cfg.setSiteConfig(siteConfig);
   }
-
-  @Test(timeout=60*1000)
+  
+  @Test(timeout = 60 * 1000)
   public void test() throws Exception {
     Connector c = getConnector();
     c.tableOperations().addSplits(MetadataTable.NAME, FunctionalTestUtils.splits("0 1 2 3 4 5 6 7 8 9 a".split(" ")));
@@ -55,7 +55,8 @@ public class BigRootTabletIT extends MacTest {
     cluster.stop();
     cluster.start();
     int count = 0;
-    for (@SuppressWarnings("unused") Entry<Key,Value> entry : c.createScanner(RootTable.NAME, Authorizations.EMPTY))
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> entry : c.createScanner(RootTable.NAME, Authorizations.EMPTY))
       count++;
     assertTrue(count > 0);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/98791b93/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java b/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java
index ae503de..77e6b22 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java
@@ -16,7 +16,7 @@
  */
 package org.apache.accumulo.test.functional;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -32,10 +32,10 @@ import org.apache.accumulo.core.master.thrift.MasterClientService.Client;
 import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.accumulo.server.util.Admin;
@@ -47,13 +47,13 @@ public class MetadataMaxFiles extends MacTest {
   
   @Override
   public void configure(MiniAccumuloConfig cfg) {
-    Map<String,String> siteConfig = new HashMap<String, String>();
+    Map<String,String> siteConfig = new HashMap<String,String>();
     siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
     siteConfig.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "10");
-    cfg.setSiteConfig(siteConfig );
+    cfg.setSiteConfig(siteConfig);
   }
-
-  @Test(timeout=240*1000)
+  
+  @Test(timeout = 240 * 1000)
   public void test() throws Exception {
     Connector c = getConnector();
     SortedSet<Text> splits = new TreeSet<Text>();
@@ -71,15 +71,15 @@ public class MetadataMaxFiles extends MacTest {
       c.tableOperations().flush(MetadataTable.NAME, null, null, true);
       c.tableOperations().flush(RootTable.NAME, null, null, true);
     }
-    UtilWaitThread.sleep(20*1000);
+    UtilWaitThread.sleep(20 * 1000);
     log.info("shutting down");
     assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
     cluster.stop();
     log.info("starting up");
     cluster.start();
-
-    UtilWaitThread.sleep(30*1000);
-
+    
+    UtilWaitThread.sleep(30 * 1000);
+    
     while (true) {
       MasterMonitorInfo stats = null;
       TCredentials creds = CredentialHelper.create("root", new PasswordToken(MacTest.PASSWORD), c.getInstance().getInstanceName());


[15/50] [abbrv] git commit: ACCUMULO-1481 : Add tests for splitting/merging root table; refactor to consolidate metadata constants and structures in an organized way; begin consolidating metadata ops into a servicer interface to abstract the code that ac

Posted by ct...@apache.org.
ACCUMULO-1481 : Add tests for splitting/merging root table; refactor to consolidate metadata constants and structures in an organized way; begin consolidating metadata ops into a servicer interface to abstract the code that actually does the servicing of metadata for all tables; remove some special cases for checking for metadata tables/root tablet that no longer apply; make mock work with root table; fix SimpleGarbageCollector to confirm candidates for deletion by checking the root table as well as the regular metadata table


git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1499510 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/446a37a9
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/446a37a9
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/446a37a9

Branch: refs/heads/ACCUMULO-1496
Commit: 446a37a9795f2df7adc841154ca05add79cf286e
Parents: a5872e6
Author: Christopher Tubbs <ct...@apache.org>
Authored: Wed Jul 3 18:32:51 2013 +0000
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Wed Jul 3 18:32:51 2013 +0000

----------------------------------------------------------------------
 .../accumulo/core/client/ZooKeeperInstance.java |    2 +-
 .../core/client/admin/TableOperationsImpl.java  |   33 +-
 .../client/impl/MetadataLocationObtainer.java   |  206 ---
 .../core/client/impl/OfflineScanner.java        |   14 +-
 .../core/client/impl/RootTabletLocator.java     |    2 +-
 .../core/client/impl/ScannerOptions.java        |    8 +-
 .../core/client/impl/TabletLocator.java         |   11 +-
 .../core/client/impl/TabletLocatorImpl.java     |    2 +-
 .../impl/TabletServerBatchReaderIterator.java   |    4 +-
 .../client/impl/TabletServerBatchWriter.java    |   10 +-
 .../core/client/impl/ThriftScanner.java         |   18 +-
 .../accumulo/core/client/impl/Writer.java       |    6 +-
 .../core/client/mapred/InputFormatBase.java     |   19 +-
 .../core/client/mapreduce/InputFormatBase.java  |   19 +-
 .../mapreduce/lib/util/InputConfigurator.java   |    2 +-
 .../accumulo/core/client/mock/MockAccumulo.java |    4 +-
 .../core/client/mock/MockTableOperations.java   |   19 +-
 .../apache/accumulo/core/data/KeyExtent.java    |   22 +-
 .../core/metadata/MetadataLocationObtainer.java |  269 ++++
 .../core/metadata/MetadataServicer.java         |   67 +
 .../accumulo/core/metadata/MetadataTable.java   |   25 +
 .../accumulo/core/metadata/RootTable.java       |   46 +
 .../core/metadata/ServicerForMetadataTable.java |   32 +
 .../core/metadata/ServicerForRootTable.java     |   49 +
 .../core/metadata/ServicerForUserTables.java    |   32 +
 .../core/metadata/TableMetadataServicer.java    |  143 ++
 .../core/metadata/schema/DataFileValue.java     |   96 ++
 .../core/metadata/schema/MetadataSchema.java    |  236 ++++
 .../org/apache/accumulo/core/util/ColumnFQ.java |   24 -
 .../org/apache/accumulo/core/util/Merge.java    |   11 +-
 .../accumulo/core/util/MetadataTable.java       |  400 ------
 .../apache/accumulo/core/util/RootTable.java    |   42 -
 .../core/util/shell/commands/FlushCommand.java  |    2 +-
 .../util/shell/commands/GetSplitsCommand.java   |    9 +-
 .../util/shell/commands/OfflineCommand.java     |    2 +-
 .../core/util/shell/commands/OnlineCommand.java |    2 +-
 .../core/client/impl/TabletLocatorImplTest.java |   25 +-
 .../core/metadata/MetadataServicerTest.java     |   86 ++
 .../accumulo/core/util/MetadataTableTest.java   |  109 --
 .../org/apache/accumulo/proxy/SimpleTest.java   |   11 +-
 .../apache/accumulo/server/ServerConstants.java |   14 +-
 .../accumulo/server/client/BulkImporter.java    |    4 +-
 .../accumulo/server/client/HdfsZooInstance.java |    2 +-
 .../server/constraints/MetadataConstraints.java |   69 +-
 .../server/gc/GarbageCollectWriteAheadLogs.java |    6 +-
 .../server/gc/SimpleGarbageCollector.java       |   87 +-
 .../iterators/MetadataBulkLoadFilter.java       |   10 +-
 .../apache/accumulo/server/master/Master.java   |   35 +-
 .../server/master/TabletGroupWatcher.java       |   68 +-
 .../master/balancer/ChaoticLoadBalancer.java    |    2 +-
 .../server/master/state/MergeStats.java         |    6 +-
 .../server/master/state/MetaDataStateStore.java |   17 +-
 .../master/state/MetaDataTableScanner.java      |   27 +-
 .../master/state/RootTabletStateStore.java      |    5 +-
 .../server/master/state/TServerInstance.java    |   10 +-
 .../master/state/ZooTabletStateStore.java       |    6 +-
 .../server/master/tableOps/BulkImport.java      |   17 +-
 .../server/master/tableOps/CloneTable.java      |    8 +-
 .../server/master/tableOps/CompactRange.java    |   15 +-
 .../server/master/tableOps/CreateTable.java     |    6 +-
 .../server/master/tableOps/DeleteTable.java     |   11 +-
 .../server/master/tableOps/ExportTable.java     |   23 +-
 .../server/master/tableOps/ImportTable.java     |   17 +-
 .../server/master/tableOps/TableRangeOp.java    |    5 +
 .../accumulo/server/metanalysis/FilterMeta.java |    2 +-
 .../accumulo/server/metanalysis/IndexMeta.java  |    5 +-
 .../server/metanalysis/PrintEvents.java         |   16 +-
 .../server/monitor/servlets/TablesServlet.java  |    4 +-
 .../accumulo/server/problems/ProblemReport.java |    6 +-
 .../server/problems/ProblemReports.java         |    7 +-
 .../security/AuditedSecurityOperation.java      |    2 +-
 .../server/security/SecurityOperation.java      |    4 +-
 .../server/security/handler/ZKAuthorizor.java   |    2 +-
 .../server/security/handler/ZKPermHandler.java  |    4 +-
 .../accumulo/server/tabletserver/Compactor.java |    2 +-
 .../server/tabletserver/FileManager.java        |    2 +-
 .../server/tabletserver/MinorCompactor.java     |    2 +-
 .../accumulo/server/tabletserver/Tablet.java    |   83 +-
 .../tabletserver/TabletIteratorEnvironment.java |    2 +-
 .../server/tabletserver/TabletServer.java       |   53 +-
 .../TabletServerResourceManager.java            |    4 +-
 .../server/util/AddFilesWithMissingEntries.java |   22 +-
 .../org/apache/accumulo/server/util/Admin.java  |    2 +-
 .../server/util/CheckForMetadataProblems.java   |   17 +-
 .../server/util/FindOfflineTablets.java         |   13 +-
 .../apache/accumulo/server/util/Initialize.java |   43 +-
 .../accumulo/server/util/LocalityCheck.java     |   15 +-
 .../accumulo/server/util/MetadataTable.java     | 1275 -----------------
 .../accumulo/server/util/MetadataTableUtil.java | 1313 ++++++++++++++++++
 .../server/util/OfflineMetadataScanner.java     |   17 +-
 .../util/RemoveEntriesForMissingFiles.java      |   12 +-
 .../accumulo/server/util/TableDiskUsage.java    |   15 +-
 .../accumulo/server/util/TabletIterator.java    |   15 +-
 .../server/util/VerifyTabletAssignments.java    |   42 +-
 .../constraints/MetadataConstraintsTest.java    |   88 +-
 .../accumulo/server/gc/TestConfirmDeletes.java  |    2 +-
 .../iterators/MetadataBulkLoadFilterTest.java   |   41 +-
 .../accumulo/server/master/TestMergeState.java  |   20 +-
 .../master/state/RootTabletStateStoreTest.java  |    2 +-
 .../tabletserver/CheckTabletMetadataTest.java   |   45 +-
 .../apache/accumulo/server/util/CloneTest.java  |   79 +-
 .../server/util/TabletIteratorTest.java         |   11 +-
 .../accumulo/test/QueryMetadataTable.java       |    5 +-
 .../accumulo/test/TestMultiTableIngest.java     |    9 +-
 .../continuous/ContinuousStatsCollector.java    |    5 +-
 .../test/functional/FunctionalTest.java         |   10 +-
 .../test/functional/SplitRecoveryTest.java      |   49 +-
 .../metadata/MetadataBatchScanTest.java         |   12 +-
 .../performance/scan/CollectTabletStats.java    |   68 +-
 .../test/randomwalk/concurrent/AddSplits.java   |    2 +-
 .../randomwalk/concurrent/CheckBalance.java     |    5 +-
 .../test/randomwalk/concurrent/Merge.java       |    2 +-
 .../org/apache/accumulo/test/MetaSplitTest.java |   23 +-
 .../test/TestAccumuloSplitRecovery.java         |   18 +-
 .../org/apache/accumulo/test/VolumeTest.java    |    4 +-
 .../accumulo/test/functional/CompactionIT.java  |   20 +-
 .../test/functional/DynamicThreadPoolsIT.java   |    4 +-
 .../test/functional/FunctionalTestUtils.java    |   12 +-
 .../test/functional/GarbageCollectorIT.java     |   24 +-
 .../accumulo/test/functional/MergeMetaIT.java   |   17 +-
 .../accumulo/test/functional/PermissionsIT.java |   33 +-
 .../accumulo/test/functional/ReadWriteIT.java   |   68 +-
 .../accumulo/test/functional/SplitIT.java       |   31 +-
 .../accumulo/test/functional/TableIT.java       |   16 +-
 .../accumulo/test/functional/TimeoutIT.java     |    5 +-
 125 files changed, 3395 insertions(+), 2925 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java b/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
index 087fc88..5b56adb 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/ZooKeeperInstance.java
@@ -30,13 +30,13 @@ import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.file.FileUtil;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ArgumentChecker;
 import org.apache.accumulo.core.util.ByteBufferUtil;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.OpTimer;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.ZooCache;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java
index ece5eff..3ad4217 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/admin/TableOperationsImpl.java
@@ -81,6 +81,9 @@ import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.core.master.thrift.MasterClientService;
 import org.apache.accumulo.core.master.thrift.TableOperation;
+import org.apache.accumulo.core.metadata.MetadataServicer;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
@@ -90,11 +93,9 @@ import org.apache.accumulo.core.util.ArgumentChecker;
 import org.apache.accumulo.core.util.ByteBufferUtil;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.util.OpTimer;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.StringUtil;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.util.ThriftUtil;
@@ -436,7 +437,7 @@ public class TableOperationsImpl extends TableOperationsHelper {
   
   private void addSplits(String tableName, SortedSet<Text> partitionKeys, String tableId) throws AccumuloException, AccumuloSecurityException,
       TableNotFoundException, AccumuloServerException {
-    TabletLocator tabLocator = TabletLocator.getInstance(instance, new Text(tableId));
+    TabletLocator tabLocator = TabletLocator.getLocator(instance, new Text(tableId));
     
     for (Text split : partitionKeys) {
       boolean successful = false;
@@ -543,38 +544,34 @@ public class TableOperationsImpl extends TableOperationsHelper {
     ArgumentChecker.notNull(tableName);
     
     String tableId = Tables.getTableId(instance, tableName);
-    if (RootTable.ID.equals(tableId))
-      return Collections.emptyList();
     
-    SortedSet<KeyExtent> tablets = new TreeSet<KeyExtent>();
-    Map<KeyExtent,String> locations = new TreeMap<KeyExtent,String>();
+    TreeMap<KeyExtent,String> tabletLocations = new TreeMap<KeyExtent,String>();
     
     while (true) {
       try {
-        tablets.clear();
-        locations.clear();
+        tabletLocations.clear();
         // the following method throws AccumuloException for some conditions that should be retried
-        MetadataTable.getEntries(instance, credentials, tableId, true, locations, tablets);
+        MetadataServicer.forTableId(instance, credentials, tableId).getTabletLocations(tabletLocations);
         break;
       } catch (AccumuloSecurityException ase) {
         throw ase;
-      } catch (Throwable t) {
+      } catch (Exception e) {
         if (!Tables.exists(instance, tableId)) {
           throw new TableNotFoundException(tableId, tableName, null);
         }
         
-        if (t instanceof RuntimeException && t.getCause() instanceof AccumuloSecurityException) {
-          throw (AccumuloSecurityException) t.getCause();
+        if (e instanceof RuntimeException && e.getCause() instanceof AccumuloSecurityException) {
+          throw (AccumuloSecurityException) e.getCause();
         }
         
-        log.info(t.getMessage() + " ... retrying ...");
+        log.info(e.getMessage() + " ... retrying ...");
         UtilWaitThread.sleep(3000);
       }
     }
     
-    ArrayList<Text> endRows = new ArrayList<Text>(tablets.size());
+    ArrayList<Text> endRows = new ArrayList<Text>(tabletLocations.size());
     
-    for (KeyExtent ke : tablets)
+    for (KeyExtent ke : tabletLocations.keySet())
       if (ke.getEndRow() != null)
         endRows.add(ke.getEndRow());
     
@@ -1039,7 +1036,7 @@ public class TableOperationsImpl extends TableOperationsHelper {
     
     Map<String,Map<KeyExtent,List<Range>>> binnedRanges = new HashMap<String,Map<KeyExtent,List<Range>>>();
     String tableId = Tables.getTableId(instance, tableName);
-    TabletLocator tl = TabletLocator.getInstance(instance, new Text(tableId));
+    TabletLocator tl = TabletLocator.getLocator(instance, new Text(tableId));
     // its possible that the cache could contain complete, but old information about a tables tablets... so clear it
     tl.invalidateCache();
     while (!tl.binRanges(Collections.singletonList(range), binnedRanges, credentials).isEmpty()) {
@@ -1181,7 +1178,7 @@ public class TableOperationsImpl extends TableOperationsHelper {
   @Override
   public void clearLocatorCache(String tableName) throws TableNotFoundException {
     ArgumentChecker.notNull(tableName);
-    TabletLocator tabLocator = TabletLocator.getInstance(instance, new Text(Tables.getTableId(instance, tableName)));
+    TabletLocator tabLocator = TabletLocator.getLocator(instance, new Text(Tables.getTableId(instance, tableName)));
     tabLocator.invalidateCache();
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/impl/MetadataLocationObtainer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/MetadataLocationObtainer.java b/core/src/main/java/org/apache/accumulo/core/client/impl/MetadataLocationObtainer.java
deleted file mode 100644
index 58ec913..0000000
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/MetadataLocationObtainer.java
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.client.impl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocation;
-import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocations;
-import org.apache.accumulo.core.client.impl.TabletLocatorImpl.TabletLocationObtainer;
-import org.apache.accumulo.core.client.impl.TabletServerBatchReaderIterator.ResultReceiver;
-import org.apache.accumulo.core.data.Column;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.data.thrift.IterInfo;
-import org.apache.accumulo.core.iterators.user.WholeRowIterator;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.OpTimer;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.TextUtil;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-
-public class MetadataLocationObtainer implements TabletLocationObtainer {
-  private static final Logger log = Logger.getLogger(MetadataLocationObtainer.class);
-  private SortedSet<Column> locCols;
-  private ArrayList<Column> columns;
-  private Instance instance;
-  
-  MetadataLocationObtainer(Instance instance) {
-    
-    this.instance = instance;
-    
-    locCols = new TreeSet<Column>();
-    locCols.add(new Column(TextUtil.getBytes(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY), null, null));
-    locCols.add(MetadataTable.PREV_ROW_COLUMN.toColumn());
-    columns = new ArrayList<Column>(locCols);
-  }
-  
-  @Override
-  public TabletLocations lookupTablet(TabletLocation src, Text row, Text stopRow, TabletLocator parent, TCredentials credentials)
-      throws AccumuloSecurityException, AccumuloException {
-    
-    try {
-      ArrayList<TabletLocation> list = new ArrayList<TabletLocation>();
-      
-      OpTimer opTimer = null;
-      if (log.isTraceEnabled())
-        opTimer = new OpTimer(log, Level.TRACE).start("Looking up in " + src.tablet_extent.getTableId() + " row=" + TextUtil.truncate(row) + "  extent="
-            + src.tablet_extent + " tserver=" + src.tablet_location);
-      
-      Range range = new Range(row, true, stopRow, true);
-      
-      TreeMap<Key,Value> encodedResults = new TreeMap<Key,Value>();
-      TreeMap<Key,Value> results = new TreeMap<Key,Value>();
-      
-      // Use the whole row iterator so that a partial mutations is not read. The code that extracts locations for tablets does a sanity check to ensure there is
-      // only one location. Reading a partial mutation could make it appear there are multiple locations when there are not.
-      List<IterInfo> serverSideIteratorList = new ArrayList<IterInfo>();
-      serverSideIteratorList.add(new IterInfo(10000, WholeRowIterator.class.getName(), "WRI"));
-      Map<String,Map<String,String>> serverSideIteratorOptions = Collections.emptyMap();
-      
-      boolean more = ThriftScanner.getBatchFromServer(credentials, range, src.tablet_extent, src.tablet_location, encodedResults, locCols,
-          serverSideIteratorList, serverSideIteratorOptions, Constants.SCAN_BATCH_SIZE, Authorizations.EMPTY, false, instance.getConfiguration());
-      
-      decodeRows(encodedResults, results);
-      
-      if (more && results.size() == 1) {
-        range = new Range(results.lastKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME), true, new Key(stopRow).followingKey(PartialKey.ROW), false);
-        encodedResults.clear();
-        more = ThriftScanner.getBatchFromServer(credentials, range, src.tablet_extent, src.tablet_location, encodedResults, locCols, serverSideIteratorList,
-            serverSideIteratorOptions, Constants.SCAN_BATCH_SIZE, Authorizations.EMPTY, false, instance.getConfiguration());
-        
-        decodeRows(encodedResults, results);
-      }
-      
-      if (opTimer != null)
-        opTimer.stop("Got " + results.size() + " results  from " + src.tablet_extent + " in %DURATION%");
-      
-      // System.out.println("results "+results.keySet());
-      
-      Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>> metadata = MetadataTable.getMetadataLocationEntries(results);
-      
-      for (Entry<KeyExtent,Text> entry : metadata.getFirst().entrySet()) {
-        list.add(new TabletLocation(entry.getKey(), entry.getValue().toString()));
-      }
-      
-      return new TabletLocations(list, metadata.getSecond());
-      
-    } catch (AccumuloServerException ase) {
-      if (log.isTraceEnabled())
-        log.trace(src.tablet_extent.getTableId() + " lookup failed, " + src.tablet_location + " server side exception");
-      throw ase;
-    } catch (NotServingTabletException e) {
-      if (log.isTraceEnabled())
-        log.trace(src.tablet_extent.getTableId() + " lookup failed, " + src.tablet_location + " not serving " + src.tablet_extent);
-      parent.invalidateCache(src.tablet_extent);
-    } catch (AccumuloException e) {
-      if (log.isTraceEnabled())
-        log.trace(src.tablet_extent.getTableId() + " lookup failed", e);
-      parent.invalidateCache(src.tablet_location);
-    }
-    
-    return null;
-  }
-  
-  private void decodeRows(TreeMap<Key,Value> encodedResults, TreeMap<Key,Value> results) throws AccumuloException {
-    for (Entry<Key,Value> entry : encodedResults.entrySet()) {
-      try {
-        results.putAll(WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()));
-      } catch (IOException e) {
-        throw new AccumuloException(e);
-      }
-    }
-  }
-  
-  @Override
-  public List<TabletLocation> lookupTablets(String tserver, Map<KeyExtent,List<Range>> tabletsRanges, TabletLocator parent, TCredentials credentials)
-      throws AccumuloSecurityException, AccumuloException {
-    
-    final TreeMap<Key,Value> results = new TreeMap<Key,Value>();
-    
-    ArrayList<TabletLocation> list = new ArrayList<TabletLocation>();
-    
-    ResultReceiver rr = new ResultReceiver() {
-      
-      @Override
-      public void receive(List<Entry<Key,Value>> entries) {
-        for (Entry<Key,Value> entry : entries) {
-          try {
-            results.putAll(WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()));
-          } catch (IOException e) {
-            throw new RuntimeException(e);
-          }
-        }
-      }
-    };
-    
-    ScannerOptions opts = new ScannerOptions();
-    opts.fetchedColumns = locCols;
-    opts.serverSideIteratorList = new ArrayList<IterInfo>();
-    opts.serverSideIteratorList.add(new IterInfo(10000, WholeRowIterator.class.getName(), "WRI")); // see comment in lookupTablet about why iterator is
-                                                                                                   // used
-    
-    Map<KeyExtent,List<Range>> unscanned = new HashMap<KeyExtent,List<Range>>();
-    Map<KeyExtent,List<Range>> failures = new HashMap<KeyExtent,List<Range>>();
-    try {
-      TabletServerBatchReaderIterator.doLookup(tserver, tabletsRanges, failures, unscanned, rr, columns, credentials, opts, Authorizations.EMPTY,
-          instance.getConfiguration());
-      if (failures.size() > 0) {
-        // invalidate extents in parents cache
-        if (log.isTraceEnabled())
-          log.trace("lookupTablets failed for " + failures.size() + " extents");
-        parent.invalidateCache(failures.keySet());
-      }
-    } catch (IOException e) {
-      log.trace("lookupTablets failed server=" + tserver, e);
-      parent.invalidateCache(tserver);
-    } catch (AccumuloServerException e) {
-      log.trace("lookupTablets failed server=" + tserver, e);
-      throw e;
-    }
-    
-    SortedMap<KeyExtent,Text> metadata = MetadataTable.getMetadataLocationEntries(results).getFirst();
-    
-    for (Entry<KeyExtent,Text> entry : metadata.entrySet()) {
-      list.add(new TabletLocation(entry.getKey(), entry.getValue().toString()));
-    }
-    
-    return list;
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
index 9c53c1d..3dc19cd 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/OfflineScanner.java
@@ -53,6 +53,9 @@ import org.apache.accumulo.core.iterators.system.DeletingIterator;
 import org.apache.accumulo.core.iterators.system.MultiIterator;
 import org.apache.accumulo.core.iterators.system.VisibilityFilter;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.security.CredentialHelper;
@@ -60,7 +63,6 @@ import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ArgumentChecker;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.commons.lang.NotImplementedException;
@@ -230,7 +232,7 @@ class OfflineIterator implements Iterator<Entry<Key,Value>> {
     
     if (currentExtent != null && !extent.isPreviousExtent(currentExtent))
       throw new AccumuloException(" " + currentExtent + " is not previous extent " + extent);
-
+    
     String tablesDir = instance.getConfiguration().get(Property.INSTANCE_DFS_DIR) + "/tables";
     String[] volumes = instance.getConfiguration().get(Property.INSTANCE_VOLUMES).split(",");
     if (volumes.length > 1) {
@@ -271,16 +273,16 @@ class OfflineIterator implements Iterator<Entry<Key,Value>> {
       Entry<Key,Value> entry = row.next();
       Key key = entry.getKey();
       
-      if (key.getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+      if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
         relFiles.add(key.getColumnQualifier().toString());
       }
       
-      if (key.getColumnFamily().equals(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY)
-          || key.getColumnFamily().equals(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY)) {
+      if (key.getColumnFamily().equals(TabletsSection.CurrentLocationColumnFamily.NAME)
+          || key.getColumnFamily().equals(TabletsSection.FutureLocationColumnFamily.NAME)) {
         location = entry.getValue().toString();
       }
       
-      if (MetadataTable.PREV_ROW_COLUMN.hasColumns(key)) {
+      if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
         extent = new KeyExtent(key.getRow(), entry.getValue());
       }
       

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/impl/RootTabletLocator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/RootTabletLocator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/RootTabletLocator.java
index 72101cc..18b2a27 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/RootTabletLocator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/RootTabletLocator.java
@@ -28,8 +28,8 @@ import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/impl/ScannerOptions.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ScannerOptions.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ScannerOptions.java
index 179ed08..7641755 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ScannerOptions.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ScannerOptions.java
@@ -46,7 +46,7 @@ public class ScannerOptions implements ScannerBase {
   protected SortedSet<Column> fetchedColumns = new TreeSet<Column>();
   
   protected long timeOut = Long.MAX_VALUE;
-
+  
   private String regexIterName = null;
   
   protected ScannerOptions() {}
@@ -102,7 +102,7 @@ public class ScannerOptions implements ScannerBase {
     
     serverSideIteratorOptions.remove(iteratorName);
   }
-    
+  
   /**
    * Override any existing options on the given named iterator
    */
@@ -190,7 +190,7 @@ public class ScannerOptions implements ScannerBase {
     if (timeOut < 0) {
       throw new IllegalArgumentException("TimeOut must be positive : " + timeOut);
     }
-
+    
     if (timeout == 0)
       this.timeOut = Long.MAX_VALUE;
     else
@@ -201,7 +201,7 @@ public class ScannerOptions implements ScannerBase {
   public long getTimeout(TimeUnit timeunit) {
     return timeunit.convert(timeOut, TimeUnit.MILLISECONDS);
   }
-
+  
   @Override
   public void close() {
     // Nothing needs to be closed

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java
index 0cc012e..de8e053 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocator.java
@@ -31,10 +31,11 @@ import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.metadata.MetadataLocationObtainer;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ArgumentChecker;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.hadoop.io.Text;
 
 public abstract class TabletLocator {
@@ -91,7 +92,7 @@ public abstract class TabletLocator {
   
   private static HashMap<LocatorKey,TabletLocator> locators = new HashMap<LocatorKey,TabletLocator>();
   
-  public static synchronized TabletLocator getInstance(Instance instance, Text tableId) {
+  public static synchronized TabletLocator getLocator(Instance instance, Text tableId) {
     
     LocatorKey key = new LocatorKey(instance.getInstanceID(), tableId);
     TabletLocator tl = locators.get(key);
@@ -101,9 +102,9 @@ public abstract class TabletLocator {
       if (tableId.toString().equals(RootTable.ID)) {
         tl = new RootTabletLocator(instance);
       } else if (tableId.toString().equals(MetadataTable.ID)) {
-        tl = new TabletLocatorImpl(new Text(MetadataTable.ID), getInstance(instance, new Text(RootTable.ID)), mlo);
+        tl = new TabletLocatorImpl(new Text(MetadataTable.ID), getLocator(instance, new Text(RootTable.ID)), mlo);
       } else {
-        tl = new TabletLocatorImpl(tableId, getInstance(instance, new Text(MetadataTable.ID)), mlo);
+        tl = new TabletLocatorImpl(tableId, getLocator(instance, new Text(MetadataTable.ID)), mlo);
       }
       locators.put(key, tl);
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocatorImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocatorImpl.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocatorImpl.java
index a1e2e8a..df5d66b 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocatorImpl.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletLocatorImpl.java
@@ -399,7 +399,7 @@ public class TabletLocatorImpl extends TabletLocator {
     
     if (ptl != null) {
       TabletLocations locations = locationObtainer.lookupTablet(ptl, metadataRow, lastTabletRow, parent, credentials);
-      while (locations != null && locations.getLocations().isEmpty() && locations.getLocationless().isEmpty() && !ptl.tablet_extent.isRootTablet()) {
+      while (locations != null && locations.getLocations().isEmpty() && locations.getLocationless().isEmpty()) {
         // try the next tablet, the current tablet does not have any tablets that overlap the row
         Text er = ptl.tablet_extent.getEndRow();
         if (er != null && er.compareTo(lastTabletRow) < 0) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
index 883d1a3..a8ce55e 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchReaderIterator.java
@@ -144,7 +144,7 @@ public class TabletServerBatchReaderIterator implements Iterator<Entry<Key,Value
     this.options = new ScannerOptions(scannerOptions);
     resultsQueue = new ArrayBlockingQueue<List<Entry<Key,Value>>>(numThreads);
     
-    this.locator = new TimeoutTabletLocator(TabletLocator.getInstance(instance, new Text(table)), timeout);
+    this.locator = new TimeoutTabletLocator(TabletLocator.getLocator(instance, new Text(table)), timeout);
     
     timeoutTrackers = Collections.synchronizedMap(new HashMap<String,TabletServerBatchReaderIterator.TimeoutTracker>());
     timedoutServers = Collections.synchronizedSet(new HashSet<String>());
@@ -604,7 +604,7 @@ public class TabletServerBatchReaderIterator implements Iterator<Entry<Key,Value
     }
   }
   
-  static void doLookup(String server, Map<KeyExtent,List<Range>> requested, Map<KeyExtent,List<Range>> failures, Map<KeyExtent,List<Range>> unscanned,
+  public static void doLookup(String server, Map<KeyExtent,List<Range>> requested, Map<KeyExtent,List<Range>> failures, Map<KeyExtent,List<Range>> unscanned,
       ResultReceiver receiver, List<Column> columns, TCredentials credentials, ScannerOptions options, Authorizations authorizations, AccumuloConfiguration conf)
       throws IOException, AccumuloSecurityException, AccumuloServerException {
     doLookup(server, requested, failures, unscanned, receiver, columns, credentials, options, authorizations, conf, new TimeoutTracker(Long.MAX_VALUE));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
index 772e0aa..766cea9 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/TabletServerBatchWriter.java
@@ -55,12 +55,12 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.thrift.TMutation;
 import org.apache.accumulo.core.data.thrift.UpdateErrors;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException;
 import org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException;
 import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.SimpleThreadPool;
 import org.apache.accumulo.core.util.ThriftUtil;
 import org.apache.accumulo.trace.instrument.Span;
@@ -631,7 +631,7 @@ public class TabletServerBatchWriter {
     private TabletLocator getLocator(String tableId) {
       TabletLocator ret = locators.get(tableId);
       if (ret == null) {
-        ret = TabletLocator.getInstance(instance, new Text(tableId));
+        ret = TabletLocator.getLocator(instance, new Text(tableId));
         ret = new TimeoutTabletLocator(ret, timeout);
         locators.put(tableId, ret);
       }
@@ -831,7 +831,7 @@ public class TabletServerBatchWriter {
             tables.add(ke.getTableId().toString());
           
           for (String table : tables)
-            TabletLocator.getInstance(instance, new Text(table)).invalidateCache(location);
+            TabletLocator.getLocator(instance, new Text(table)).invalidateCache(location);
           
           failedMutations.add(location, tsm);
         } finally {
@@ -868,7 +868,7 @@ public class TabletServerBatchWriter {
               client.update(tinfo, credentials, entry.getKey().toThrift(), entry.getValue().get(0).toThrift());
             } catch (NotServingTabletException e) {
               allFailures.addAll(entry.getKey().getTableId().toString(), entry.getValue());
-              TabletLocator.getInstance(instance, new Text(entry.getKey().getTableId())).invalidateCache(entry.getKey());
+              TabletLocator.getLocator(instance, new Text(entry.getKey().getTableId())).invalidateCache(entry.getKey());
             } catch (ConstraintViolationException e) {
               updatedConstraintViolations(Translator.translate(e.violationSummaries, Translator.TCVST));
             }
@@ -909,7 +909,7 @@ public class TabletServerBatchWriter {
               
               String table = failedExtent.getTableId().toString();
               
-              TabletLocator.getInstance(instance, new Text(table)).invalidateCache(failedExtent);
+              TabletLocator.getLocator(instance, new Text(table)).invalidateCache(failedExtent);
               
               ArrayList<Mutation> mutations = (ArrayList<Mutation>) tabMuts.get(failedExtent);
               allFailures.addAll(table, mutations.subList(numCommitted, mutations.size()));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftScanner.java b/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftScanner.java
index 37e4579..584b56c 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftScanner.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/ThriftScanner.java
@@ -66,8 +66,6 @@ import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TApplicationException;
 import org.apache.thrift.TException;
-import org.apache.thrift.TServiceClient;
-
 
 public class ThriftScanner {
   private static final Logger log = Logger.getLogger(ThriftScanner.class);
@@ -80,7 +78,7 @@ public class ThriftScanner {
     }
   }
   
-  static boolean getBatchFromServer(TCredentials credentials, Range range, KeyExtent extent, String server, SortedMap<Key,Value> results,
+  public static boolean getBatchFromServer(TCredentials credentials, Range range, KeyExtent extent, String server, SortedMap<Key,Value> results,
       SortedSet<Column> fetchedColumns, List<IterInfo> serverSideIteratorList, Map<String,Map<String,String>> serverSideIteratorOptions, int size,
       Authorizations authorizations, boolean retry, AccumuloConfiguration conf) throws AccumuloException, AccumuloSecurityException, NotServingTabletException {
     if (server == null)
@@ -111,7 +109,7 @@ public class ThriftScanner {
         
         return isr.result.more;
       } finally {
-        ThriftUtil.returnClient((TServiceClient) client);
+        ThriftUtil.returnClient(client);
       }
     } catch (TApplicationException tae) {
       throw new AccumuloServerException(server, tae);
@@ -122,7 +120,7 @@ public class ThriftScanner {
       throw new AccumuloSecurityException(e.user, e.code, e);
     } catch (TException e) {
       log.debug("Error getting transport to " + server + " : " + e);
-    } 
+    }
     
     throw new AccumuloException("getBatchFromServer: failed");
   }
@@ -213,8 +211,8 @@ public class ThriftScanner {
           
           Span locateSpan = Trace.start("scan:locateTablet");
           try {
-            loc = TabletLocator.getInstance(instance, scanState.tableId).locateTablet(scanState.startRow, scanState.skipStartRow, false, credentials);
-
+            loc = TabletLocator.getLocator(instance, scanState.tableId).locateTablet(scanState.startRow, scanState.skipStartRow, false, credentials);
+            
             if (loc == null) {
               if (!Tables.exists(instance, scanState.tableId.toString()))
                 throw new TableDeletedException(scanState.tableId.toString());
@@ -281,7 +279,7 @@ public class ThriftScanner {
             log.trace(error);
           lastError = error;
           
-          TabletLocator.getInstance(instance, scanState.tableId).invalidateCache(loc.tablet_extent);
+          TabletLocator.getLocator(instance, scanState.tableId).invalidateCache(loc.tablet_extent);
           loc = null;
           
           // no need to try the current scan id somewhere else
@@ -327,7 +325,7 @@ public class ThriftScanner {
           
           UtilWaitThread.sleep(100);
         } catch (TException e) {
-          TabletLocator.getInstance(instance, scanState.tableId).invalidateCache(loc.tablet_location);
+          TabletLocator.getLocator(instance, scanState.tableId).invalidateCache(loc.tablet_location);
           error = "Scan failed, thrift error " + e.getClass().getName() + "  " + e.getMessage() + " " + loc;
           if (!error.equals(lastError))
             log.debug(error);
@@ -443,7 +441,7 @@ public class ThriftScanner {
     } catch (ThriftSecurityException e) {
       throw new AccumuloSecurityException(e.user, e.code, e);
     } finally {
-      ThriftUtil.returnClient((TServiceClient) client);
+      ThriftUtil.returnClient(client);
       Thread.currentThread().setName(old);
     }
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/impl/Writer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/impl/Writer.java b/core/src/main/java/org/apache/accumulo/core/client/impl/Writer.java
index cec700d..85b1869 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/impl/Writer.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/impl/Writer.java
@@ -84,7 +84,7 @@ public class Writer {
       throw new IllegalArgumentException("Can not add empty mutations");
     
     while (true) {
-      TabletLocation tabLoc = TabletLocator.getInstance(instance, table).locateTablet(new Text(m.getRow()), false, true, credentials);
+      TabletLocation tabLoc = TabletLocator.getLocator(instance, table).locateTablet(new Text(m.getRow()), false, true, credentials);
       
       if (tabLoc == null) {
         log.trace("No tablet location found for row " + new String(m.getRow()));
@@ -97,10 +97,10 @@ public class Writer {
         return;
       } catch (NotServingTabletException e) {
         log.trace("Not serving tablet, server = " + tabLoc.tablet_location);
-        TabletLocator.getInstance(instance, table).invalidateCache(tabLoc.tablet_extent);
+        TabletLocator.getLocator(instance, table).invalidateCache(tabLoc.tablet_extent);
       } catch (TException e) {
         log.error("error sending update to " + tabLoc.tablet_location + ": " + e);
-        TabletLocator.getInstance(instance, table).invalidateCache(tabLoc.tablet_extent);
+        TabletLocator.getLocator(instance, table).invalidateCache(tabLoc.tablet_extent);
       } 
       
       UtilWaitThread.sleep(500);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
index f6fc744..e5829af 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapred/InputFormatBase.java
@@ -53,10 +53,11 @@ import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
@@ -687,10 +688,10 @@ public abstract class InputFormatBase<K,V> implements InputFormat<K,V> {
       
       Range metadataRange = new Range(new KeyExtent(new Text(tableId), startRow, null).getMetadataEntry(), true, null, false);
       Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-      MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
-      scanner.fetchColumnFamily(MetadataTable.LAST_LOCATION_COLUMN_FAMILY);
-      scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
-      scanner.fetchColumnFamily(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY);
+      TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
+      scanner.fetchColumnFamily(TabletsSection.LastLocationColumnFamily.NAME);
+      scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+      scanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME);
       scanner.setRange(metadataRange);
       
       RowIterator rowIter = new RowIterator(scanner);
@@ -707,16 +708,16 @@ public abstract class InputFormatBase<K,V> implements InputFormat<K,V> {
           Entry<Key,Value> entry = row.next();
           Key key = entry.getKey();
           
-          if (key.getColumnFamily().equals(MetadataTable.LAST_LOCATION_COLUMN_FAMILY)) {
+          if (key.getColumnFamily().equals(TabletsSection.LastLocationColumnFamily.NAME)) {
             last = entry.getValue().toString();
           }
           
-          if (key.getColumnFamily().equals(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY)
-              || key.getColumnFamily().equals(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY)) {
+          if (key.getColumnFamily().equals(TabletsSection.CurrentLocationColumnFamily.NAME)
+              || key.getColumnFamily().equals(TabletsSection.FutureLocationColumnFamily.NAME)) {
             location = entry.getValue().toString();
           }
           
-          if (MetadataTable.PREV_ROW_COLUMN.hasColumns(key)) {
+          if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
             extent = new KeyExtent(key.getRow(), entry.getValue());
           }
           

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
index c280b97..ea40e02 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/InputFormatBase.java
@@ -64,10 +64,11 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.VersioningIterator;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.conf.Configuration;
@@ -711,10 +712,10 @@ public abstract class InputFormatBase<K,V> extends InputFormat<K,V> {
       
       Range metadataRange = new Range(new KeyExtent(new Text(tableId), startRow, null).getMetadataEntry(), true, null, false);
       Scanner scanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-      MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
-      scanner.fetchColumnFamily(MetadataTable.LAST_LOCATION_COLUMN_FAMILY);
-      scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
-      scanner.fetchColumnFamily(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY);
+      TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
+      scanner.fetchColumnFamily(TabletsSection.LastLocationColumnFamily.NAME);
+      scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+      scanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME);
       scanner.setRange(metadataRange);
       
       RowIterator rowIter = new RowIterator(scanner);
@@ -731,16 +732,16 @@ public abstract class InputFormatBase<K,V> extends InputFormat<K,V> {
           Entry<Key,Value> entry = row.next();
           Key key = entry.getKey();
           
-          if (key.getColumnFamily().equals(MetadataTable.LAST_LOCATION_COLUMN_FAMILY)) {
+          if (key.getColumnFamily().equals(TabletsSection.LastLocationColumnFamily.NAME)) {
             last = entry.getValue().toString();
           }
           
-          if (key.getColumnFamily().equals(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY)
-              || key.getColumnFamily().equals(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY)) {
+          if (key.getColumnFamily().equals(TabletsSection.CurrentLocationColumnFamily.NAME)
+              || key.getColumnFamily().equals(TabletsSection.FutureLocationColumnFamily.NAME)) {
             location = entry.getValue().toString();
           }
           
-          if (MetadataTable.PREV_ROW_COLUMN.hasColumns(key)) {
+          if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
             extent = new KeyExtent(key.getRow(), entry.getValue());
           }
           

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
index c582a72..15e045b 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/util/InputConfigurator.java
@@ -480,7 +480,7 @@ public class InputConfigurator extends ConfiguratorBase {
       return new MockTabletLocator();
     Instance instance = getInstance(implementingClass, conf);
     String tableName = getInputTableName(implementingClass, conf);
-    return TabletLocator.getInstance(instance, new Text(Tables.getTableId(instance, tableName)));
+    return TabletLocator.getLocator(instance, new Text(Tables.getTableId(instance, tableName)));
   }
   
   // InputFormat doesn't have the equivalent of OutputFormat's checkOutputSpecs(JobContext job)

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java
index 346e6ee..5ee144d 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockAccumulo.java
@@ -26,11 +26,11 @@ import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.admin.TimeType;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.io.Text;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
index bf7e7f9..2c416c6 100644
--- a/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/client/mock/MockTableOperations.java
@@ -46,6 +46,8 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVIterator;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
@@ -122,7 +124,7 @@ public class MockTableOperations extends TableOperationsHelper {
       throw new TableNotFoundException(tableName, tableName, "");
     return acu.getSplits(tableName);
   }
-
+  
   @Override
   public Collection<Text> listSplits(String tableName, int maxSplits) throws TableNotFoundException {
     return listSplits(tableName);
@@ -297,20 +299,25 @@ public class MockTableOperations extends TableOperationsHelper {
   public Map<String,String> tableIdMap() {
     Map<String,String> result = new HashMap<String,String>();
     for (String table : acu.tables.keySet()) {
-      result.put(table, table);
+      if (RootTable.NAME.equals(table))
+        result.put(table, RootTable.ID);
+      else if (MetadataTable.NAME.equals(table))
+        result.put(table, MetadataTable.ID);
+      else
+        result.put(table, table);
     }
     return result;
   }
-
+  
   @Override
   public List<DiskUsage> getDiskUsage(Set<String> tables) throws AccumuloException, AccumuloSecurityException {
-
+    
     List<DiskUsage> diskUsages = new ArrayList<DiskUsage>();
     diskUsages.add(new DiskUsage(new TreeSet<String>(tables), 0l));
-
+    
     return diskUsages;
   }
-
+  
   @Override
   public void merge(String tableName, Text start, Text end) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     if (!exists(tableName))

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java b/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
index dc45e91..fc766fe 100644
--- a/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
+++ b/core/src/main/java/org/apache/accumulo/core/data/KeyExtent.java
@@ -40,9 +40,11 @@ import java.util.UUID;
 import java.util.WeakHashMap;
 
 import org.apache.accumulo.core.data.thrift.TKeyExtent;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.hadoop.io.BinaryComparable;
 import org.apache.hadoop.io.Text;
@@ -128,18 +130,8 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
     return getMetadataEntry(getTableId(), getEndRow());
   }
   
-  public static Text getMetadataEntry(Text table, Text row) {
-    Text entry = new Text(table);
-    
-    if (row == null) {
-      entry.append(new byte[] {'<'}, 0, 1);
-    } else {
-      entry.append(new byte[] {';'}, 0, 1);
-      entry.append(row.getBytes(), 0, row.getLength());
-    }
-    
-    return entry;
-    
+  public static Text getMetadataEntry(Text tableId, Text endRow) {
+    return MetadataSchema.TabletsSection.getRow(tableId, endRow);
   }
   
   // constructor for loading extents from metadata rows
@@ -398,7 +390,7 @@ public class KeyExtent implements WritableComparable<KeyExtent> {
   
   public static Mutation getPrevRowUpdateMutation(KeyExtent ke) {
     Mutation m = new Mutation(ke.getMetadataEntry());
-    MetadataTable.PREV_ROW_COLUMN.put(m, encodePrevEndRow(ke.getPrevEndRow()));
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, encodePrevEndRow(ke.getPrevEndRow()));
     return m;
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
new file mode 100644
index 0000000..237cacc
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataLocationObtainer.java
@@ -0,0 +1,269 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedMap;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.impl.AccumuloServerException;
+import org.apache.accumulo.core.client.impl.ScannerOptions;
+import org.apache.accumulo.core.client.impl.TabletLocator;
+import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocation;
+import org.apache.accumulo.core.client.impl.TabletLocator.TabletLocations;
+import org.apache.accumulo.core.client.impl.TabletLocatorImpl.TabletLocationObtainer;
+import org.apache.accumulo.core.client.impl.TabletServerBatchReaderIterator;
+import org.apache.accumulo.core.client.impl.TabletServerBatchReaderIterator.ResultReceiver;
+import org.apache.accumulo.core.client.impl.ThriftScanner;
+import org.apache.accumulo.core.data.Column;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.data.thrift.IterInfo;
+import org.apache.accumulo.core.iterators.user.WholeRowIterator;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException;
+import org.apache.accumulo.core.util.OpTimer;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.util.TextUtil;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+
+public class MetadataLocationObtainer implements TabletLocationObtainer {
+  private static final Logger log = Logger.getLogger(MetadataLocationObtainer.class);
+  private SortedSet<Column> locCols;
+  private ArrayList<Column> columns;
+  private Instance instance;
+  
+  public MetadataLocationObtainer(Instance instance) {
+    
+    this.instance = instance;
+    
+    locCols = new TreeSet<Column>();
+    locCols.add(new Column(TextUtil.getBytes(TabletsSection.CurrentLocationColumnFamily.NAME), null, null));
+    locCols.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.toColumn());
+    columns = new ArrayList<Column>(locCols);
+  }
+  
+  @Override
+  public TabletLocations lookupTablet(TabletLocation src, Text row, Text stopRow, TabletLocator parent, TCredentials credentials)
+      throws AccumuloSecurityException, AccumuloException {
+    
+    try {
+      ArrayList<TabletLocation> list = new ArrayList<TabletLocation>();
+      
+      OpTimer opTimer = null;
+      if (log.isTraceEnabled())
+        opTimer = new OpTimer(log, Level.TRACE).start("Looking up in " + src.tablet_extent.getTableId() + " row=" + TextUtil.truncate(row) + "  extent="
+            + src.tablet_extent + " tserver=" + src.tablet_location);
+      
+      Range range = new Range(row, true, stopRow, true);
+      
+      TreeMap<Key,Value> encodedResults = new TreeMap<Key,Value>();
+      TreeMap<Key,Value> results = new TreeMap<Key,Value>();
+      
+      // Use the whole row iterator so that a partial mutations is not read. The code that extracts locations for tablets does a sanity check to ensure there is
+      // only one location. Reading a partial mutation could make it appear there are multiple locations when there are not.
+      List<IterInfo> serverSideIteratorList = new ArrayList<IterInfo>();
+      serverSideIteratorList.add(new IterInfo(10000, WholeRowIterator.class.getName(), "WRI"));
+      Map<String,Map<String,String>> serverSideIteratorOptions = Collections.emptyMap();
+      
+      boolean more = ThriftScanner.getBatchFromServer(credentials, range, src.tablet_extent, src.tablet_location, encodedResults, locCols,
+          serverSideIteratorList, serverSideIteratorOptions, Constants.SCAN_BATCH_SIZE, Authorizations.EMPTY, false, instance.getConfiguration());
+      
+      decodeRows(encodedResults, results);
+      
+      if (more && results.size() == 1) {
+        range = new Range(results.lastKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL_COLVIS_TIME), true, new Key(stopRow).followingKey(PartialKey.ROW), false);
+        encodedResults.clear();
+        more = ThriftScanner.getBatchFromServer(credentials, range, src.tablet_extent, src.tablet_location, encodedResults, locCols, serverSideIteratorList,
+            serverSideIteratorOptions, Constants.SCAN_BATCH_SIZE, Authorizations.EMPTY, false, instance.getConfiguration());
+        
+        decodeRows(encodedResults, results);
+      }
+      
+      if (opTimer != null)
+        opTimer.stop("Got " + results.size() + " results  from " + src.tablet_extent + " in %DURATION%");
+      
+      // System.out.println("results "+results.keySet());
+      
+      Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>> metadata = MetadataLocationObtainer.getMetadataLocationEntries(results);
+      
+      for (Entry<KeyExtent,Text> entry : metadata.getFirst().entrySet()) {
+        list.add(new TabletLocation(entry.getKey(), entry.getValue().toString()));
+      }
+      
+      return new TabletLocations(list, metadata.getSecond());
+      
+    } catch (AccumuloServerException ase) {
+      if (log.isTraceEnabled())
+        log.trace(src.tablet_extent.getTableId() + " lookup failed, " + src.tablet_location + " server side exception");
+      throw ase;
+    } catch (NotServingTabletException e) {
+      if (log.isTraceEnabled())
+        log.trace(src.tablet_extent.getTableId() + " lookup failed, " + src.tablet_location + " not serving " + src.tablet_extent);
+      parent.invalidateCache(src.tablet_extent);
+    } catch (AccumuloException e) {
+      if (log.isTraceEnabled())
+        log.trace(src.tablet_extent.getTableId() + " lookup failed", e);
+      parent.invalidateCache(src.tablet_location);
+    }
+    
+    return null;
+  }
+  
+  private void decodeRows(TreeMap<Key,Value> encodedResults, TreeMap<Key,Value> results) throws AccumuloException {
+    for (Entry<Key,Value> entry : encodedResults.entrySet()) {
+      try {
+        results.putAll(WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()));
+      } catch (IOException e) {
+        throw new AccumuloException(e);
+      }
+    }
+  }
+  
+  @Override
+  public List<TabletLocation> lookupTablets(String tserver, Map<KeyExtent,List<Range>> tabletsRanges, TabletLocator parent, TCredentials credentials)
+      throws AccumuloSecurityException, AccumuloException {
+    
+    final TreeMap<Key,Value> results = new TreeMap<Key,Value>();
+    
+    ArrayList<TabletLocation> list = new ArrayList<TabletLocation>();
+    
+    ResultReceiver rr = new ResultReceiver() {
+      
+      @Override
+      public void receive(List<Entry<Key,Value>> entries) {
+        for (Entry<Key,Value> entry : entries) {
+          try {
+            results.putAll(WholeRowIterator.decodeRow(entry.getKey(), entry.getValue()));
+          } catch (IOException e) {
+            throw new RuntimeException(e);
+          }
+        }
+      }
+    };
+    
+    ScannerOptions opts = new ScannerOptions() {
+      ScannerOptions setOpts() {
+        this.fetchedColumns = locCols;
+        this.serverSideIteratorList = new ArrayList<IterInfo>();
+        // see comment in lookupTablet about why iterator is used
+        this.serverSideIteratorList.add(new IterInfo(10000, WholeRowIterator.class.getName(), "WRI"));
+        return this;
+      }
+    }.setOpts();
+    
+    Map<KeyExtent,List<Range>> unscanned = new HashMap<KeyExtent,List<Range>>();
+    Map<KeyExtent,List<Range>> failures = new HashMap<KeyExtent,List<Range>>();
+    try {
+      TabletServerBatchReaderIterator.doLookup(tserver, tabletsRanges, failures, unscanned, rr, columns, credentials, opts, Authorizations.EMPTY,
+          instance.getConfiguration());
+      if (failures.size() > 0) {
+        // invalidate extents in parents cache
+        if (log.isTraceEnabled())
+          log.trace("lookupTablets failed for " + failures.size() + " extents");
+        parent.invalidateCache(failures.keySet());
+      }
+    } catch (IOException e) {
+      log.trace("lookupTablets failed server=" + tserver, e);
+      parent.invalidateCache(tserver);
+    } catch (AccumuloServerException e) {
+      log.trace("lookupTablets failed server=" + tserver, e);
+      throw e;
+    }
+    
+    SortedMap<KeyExtent,Text> metadata = MetadataLocationObtainer.getMetadataLocationEntries(results).getFirst();
+    
+    for (Entry<KeyExtent,Text> entry : metadata.entrySet()) {
+      list.add(new TabletLocation(entry.getKey(), entry.getValue().toString()));
+    }
+    
+    return list;
+  }
+  
+  public static Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>> getMetadataLocationEntries(SortedMap<Key,Value> entries) {
+    Key key;
+    Value val;
+    Text location = null;
+    Value prevRow = null;
+    KeyExtent ke;
+    
+    SortedMap<KeyExtent,Text> results = new TreeMap<KeyExtent,Text>();
+    ArrayList<KeyExtent> locationless = new ArrayList<KeyExtent>();
+    
+    Text lastRowFromKey = new Text();
+    
+    // text obj below is meant to be reused in loop for efficiency
+    Text colf = new Text();
+    Text colq = new Text();
+    
+    for (Entry<Key,Value> entry : entries.entrySet()) {
+      key = entry.getKey();
+      val = entry.getValue();
+      
+      if (key.compareRow(lastRowFromKey) != 0) {
+        prevRow = null;
+        location = null;
+        key.getRow(lastRowFromKey);
+      }
+      
+      colf = key.getColumnFamily(colf);
+      colq = key.getColumnQualifier(colq);
+      
+      // interpret the row id as a key extent
+      if (colf.equals(TabletsSection.CurrentLocationColumnFamily.NAME) || colf.equals(TabletsSection.FutureLocationColumnFamily.NAME)) {
+        if (location != null) {
+          throw new IllegalStateException("Tablet has multiple locations : " + lastRowFromKey);
+        }
+        location = new Text(val.toString());
+      } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(colf, colq)) {
+        prevRow = new Value(val);
+      }
+      
+      if (prevRow != null) {
+        ke = new KeyExtent(key.getRow(), prevRow);
+        if (location != null)
+          results.put(ke, location);
+        else
+          locationless.add(ke);
+        
+        location = null;
+        prevRow = null;
+      }
+    }
+    
+    return new Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>>(results, locationless);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/MetadataServicer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/MetadataServicer.java b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataServicer.java
new file mode 100644
index 0000000..7473fcf
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataServicer.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import java.util.SortedMap;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.security.CredentialHelper;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.ArgumentChecker;
+
+/**
+ * Provides a consolidated API for handling table metadata
+ */
+public abstract class MetadataServicer {
+  
+  public static MetadataServicer forTableName(Instance instance, TCredentials credentials, String tableName) throws AccumuloException,
+      AccumuloSecurityException {
+    ArgumentChecker.notNull(tableName);
+    Connector conn = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials));
+    return forTableId(instance, credentials, conn.tableOperations().tableIdMap().get(tableName));
+  }
+  
+  public static MetadataServicer forTableId(Instance instance, TCredentials credentials, String tableId) {
+    ArgumentChecker.notNull(tableId);
+    if (RootTable.ID.equals(tableId))
+      return new ServicerForRootTable(instance, credentials);
+    else if (MetadataTable.ID.equals(tableId))
+      return new ServicerForMetadataTable(instance, credentials);
+    else
+      return new ServicerForUserTables(instance, credentials, tableId);
+  }
+  
+  /**
+   * 
+   * @return the table id of the table currently being serviced
+   */
+  public abstract String getServicedTableId();
+  
+  /**
+   * Populate the provided data structure with the known tablets for the table being serviced
+   * 
+   * @param tablets
+   *          A mapping of all known tablets to their location (if available, null otherwise)
+   */
+  public abstract void getTabletLocations(SortedMap<KeyExtent,String> tablets) throws AccumuloException, AccumuloSecurityException, TableNotFoundException;
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/MetadataTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/MetadataTable.java b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataTable.java
new file mode 100644
index 0000000..7276f6c
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/MetadataTable.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+
+public class MetadataTable {
+  
+  public static final String ID = "!0";
+  public static final String NAME = "!METADATA";
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java b/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java
new file mode 100644
index 0000000..402c1cc
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/RootTable.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.hadoop.io.Text;
+
+/**
+ * 
+ */
+public class RootTable {
+  
+  public static final String ID = "!!R";
+  public static final String NAME = "!!ROOT";
+  
+  /**
+   * DFS location relative to the Accumulo directory
+   */
+  public static final String ROOT_TABLET_LOCATION = "/root_tablet";
+  
+  /**
+   * ZK path relative to the instance directory for information about the root tablet
+   */
+  public static final String ZROOT_TABLET = ROOT_TABLET_LOCATION;
+  public static final String ZROOT_TABLET_LOCATION = ZROOT_TABLET + "/location";
+  public static final String ZROOT_TABLET_FUTURE_LOCATION = ZROOT_TABLET + "/future_location";
+  public static final String ZROOT_TABLET_LAST_LOCATION = ZROOT_TABLET + "/lastlocation";
+  public static final String ZROOT_TABLET_WALOGS = ZROOT_TABLET + "/walogs";
+  
+  public static final KeyExtent EXTENT = new KeyExtent(new Text(ID), null, null);
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java
new file mode 100644
index 0000000..64e49ef
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForMetadataTable.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+
+/**
+ * A metadata servicer for the metadata table (which holds metadata for user tables).<br />
+ * The metadata table's metadata is serviced in the root table.
+ */
+class ServicerForMetadataTable extends TableMetadataServicer {
+  
+  public ServicerForMetadataTable(Instance instance, TCredentials credentials) {
+    super(instance, credentials, RootTable.NAME, MetadataTable.ID);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java
new file mode 100644
index 0000000..4366991
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForRootTable.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import java.util.SortedMap;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+
+/**
+ * A metadata servicer for the root table.<br />
+ * The root table's metadata is serviced in zookeeper.
+ */
+class ServicerForRootTable extends MetadataServicer {
+  
+  private Instance instance;
+  
+  public ServicerForRootTable(Instance instance, TCredentials credentials) {
+    this.instance = instance;
+  }
+  
+  @Override
+  public String getServicedTableId() {
+    return RootTable.ID;
+  }
+  
+  @Override
+  public void getTabletLocations(SortedMap<KeyExtent,String> tablets) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    tablets.put(RootTable.EXTENT, instance.getRootTabletLocation());
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
new file mode 100644
index 0000000..7e15bc0
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/ServicerForUserTables.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+
+/**
+ * A metadata servicer for user tables.<br />
+ * Metadata for user tables are serviced in the metadata table.
+ */
+class ServicerForUserTables extends TableMetadataServicer {
+  
+  public ServicerForUserTables(Instance instance, TCredentials credentials, String tableId) {
+    super(instance, credentials, MetadataTable.NAME, tableId);
+  }
+  
+}


[42/50] [abbrv] git commit: ACCUMULO-1030 First attempt at a working accumulo-maven-plugin

Posted by ct...@apache.org.
ACCUMULO-1030 First attempt at a working accumulo-maven-plugin

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1502702 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/db39c354
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/db39c354
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/db39c354

Branch: refs/heads/ACCUMULO-1496
Commit: db39c354eb0a393d504c3454b65bf6f2969d54bf
Parents: 6c4bfc7
Author: Christopher Tubbs <ct...@apache.org>
Authored: Fri Jul 12 21:50:41 2013 +0000
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Fri Jul 12 21:50:41 2013 +0000

----------------------------------------------------------------------
 maven-plugin/pom.xml                            | 163 +++++++++++++++++++
 maven-plugin/src/it/plugin-test/pom.xml         | 117 +++++++++++++
 .../src/it/plugin-test/postbuild.groovy         |  24 +++
 .../org/apache/accumulo/plugin/PluginIT.java    |  98 +++++++++++
 maven-plugin/src/it/settings.xml                |  55 +++++++
 .../maven/plugin/AbstractAccumuloMojo.java      |  42 +++++
 .../org/accumulo/maven/plugin/StartMojo.java    |  67 ++++++++
 .../org/accumulo/maven/plugin/StopMojo.java     |  45 +++++
 .../minicluster/MiniAccumuloCluster.java        | 132 +++++++--------
 .../minicluster/MiniAccumuloInstance.java       |  51 ++++++
 pom.xml                                         |  43 +++--
 11 files changed, 753 insertions(+), 84 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/db39c354/maven-plugin/pom.xml
----------------------------------------------------------------------
diff --git a/maven-plugin/pom.xml b/maven-plugin/pom.xml
new file mode 100644
index 0000000..9060498
--- /dev/null
+++ b/maven-plugin/pom.xml
@@ -0,0 +1,163 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.accumulo</groupId>
+    <artifactId>accumulo-project</artifactId>
+    <version>1.6.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>accumulo-maven-plugin</artifactId>
+  <packaging>maven-plugin</packaging>
+  <name>Accumulo Maven Plugin</name>
+  <dependencies>
+    <dependency>
+      <groupId>commons-cli</groupId>
+      <artifactId>commons-cli</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-collections</groupId>
+      <artifactId>commons-collections</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-configuration</groupId>
+      <artifactId>commons-configuration</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-httpclient</groupId>
+      <artifactId>commons-httpclient</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-minicluster</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.maven</groupId>
+      <artifactId>maven-plugin-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.maven.plugin-tools</groupId>
+      <artifactId>maven-plugin-annotations</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.codehaus.plexus</groupId>
+      <artifactId>plexus-utils</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-plugin-plugin</artifactId>
+        <configuration>
+          <skipErrorNoDescriptorsFound>true</skipErrorNoDescriptorsFound>
+        </configuration>
+        <executions>
+          <execution>
+            <id>mojo-descriptor</id>
+            <goals>
+              <goal>descriptor</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>help-goal</id>
+            <goals>
+              <goal>helpmojo</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+  <profiles>
+    <profile>
+      <id>test-accumulo-maven-plugin</id>
+      <activation>
+        <property>
+          <name>!skipITs</name>
+        </property>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-invoker-plugin</artifactId>
+            <configuration>
+              <debug>true</debug>
+              <cloneProjectsTo>${project.build.directory}/it</cloneProjectsTo>
+              <pomIncludes>
+                <pomInclude>*/pom.xml</pomInclude>
+              </pomIncludes>
+              <localRepositoryPath>${project.build.directory}/local-repo</localRepositoryPath>
+              <settingsFile>src/it/settings.xml</settingsFile>
+              <goals>
+                <goal>clean</goal>
+                <goal>post-integration-test</goal>
+              </goals>
+            </configuration>
+            <executions>
+              <execution>
+                <id>integration-test</id>
+                <goals>
+                  <goal>install</goal>
+                  <goal>run</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+</project>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/db39c354/maven-plugin/src/it/plugin-test/pom.xml
----------------------------------------------------------------------
diff --git a/maven-plugin/src/it/plugin-test/pom.xml b/maven-plugin/src/it/plugin-test/pom.xml
new file mode 100644
index 0000000..7c430ca
--- /dev/null
+++ b/maven-plugin/src/it/plugin-test/pom.xml
@@ -0,0 +1,117 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+
+  <parent>
+    <groupId>@project.groupId@</groupId>
+    <artifactId>accumulo-project</artifactId>
+    <version>@project.version@</version>
+  </parent>
+
+  <groupId>@project.groupId@</groupId>
+  <artifactId>@project.artifactId@-test</artifactId>
+  <version>@project.version@</version>
+
+  <description>A simple IT verifying the basic use case.</description>
+
+  <properties>
+    <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>commons-cli</groupId>
+      <artifactId>commons-cli</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-codec</groupId>
+      <artifactId>commons-codec</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-collections</groupId>
+      <artifactId>commons-collections</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-configuration</groupId>
+      <artifactId>commons-configuration</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-httpclient</groupId>
+      <artifactId>commons-httpclient</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-lang</groupId>
+      <artifactId>commons-lang</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-minicluster</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.zookeeper</groupId>
+      <artifactId>zookeeper</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>@project.groupId@</groupId>
+        <artifactId>@project.artifactId@</artifactId>
+        <version>@project.version@</version>
+        <configuration>
+          <instanceName>plugin-it-instance</instanceName>
+          <rootPassword>ITSecret</rootPassword>
+        </configuration>
+        <executions>
+          <execution>
+            <id>run-plugin</id>
+            <goals>
+              <goal>start</goal>
+              <goal>stop</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/db39c354/maven-plugin/src/it/plugin-test/postbuild.groovy
----------------------------------------------------------------------
diff --git a/maven-plugin/src/it/plugin-test/postbuild.groovy b/maven-plugin/src/it/plugin-test/postbuild.groovy
new file mode 100644
index 0000000..404961a
--- /dev/null
+++ b/maven-plugin/src/it/plugin-test/postbuild.groovy
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+File outputDirectory = new File(basedir, "target/accumulo-maven-plugin/plugin-it-instance");
+assert outputDirectory.isDirectory()
+
+File testCreateTable = new File(basedir, "target/accumulo-maven-plugin/plugin-it-instance/testCreateTablePassed");
+assert testCreateTable.isFile()
+
+File testWriteToTable = new File(basedir, "target/accumulo-maven-plugin/plugin-it-instance/testWriteToTablePassed");
+assert testWriteToTable.isFile()

http://git-wip-us.apache.org/repos/asf/accumulo/blob/db39c354/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/PluginIT.java
----------------------------------------------------------------------
diff --git a/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/PluginIT.java b/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/PluginIT.java
new file mode 100644
index 0000000..e4dee28
--- /dev/null
+++ b/maven-plugin/src/it/plugin-test/src/test/java/org/apache/accumulo/plugin/PluginIT.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.plugin;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.MiniAccumuloInstance;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class PluginIT {
+  
+  private static Instance instance;
+  private static Connector connector;
+  
+  @BeforeClass
+  public static void setUp() throws Exception {
+    String instanceName = "plugin-it-instance";
+    instance = new MiniAccumuloInstance(instanceName, new File("target/accumulo-maven-plugin/" + instanceName));
+    connector = instance.getConnector("root", new PasswordToken("ITSecret"));
+  }
+  
+  @Test
+  public void testInstanceConnection() {
+    assertTrue(instance != null);
+    assertTrue(instance instanceof MiniAccumuloInstance);
+    assertTrue(connector != null);
+    assertTrue(connector instanceof Connector);
+  }
+  
+  @Test
+  public void testCreateTable() throws AccumuloException, AccumuloSecurityException, TableExistsException, IOException {
+    String tableName = "testCreateTable";
+    connector.tableOperations().create(tableName);
+    assertTrue(connector.tableOperations().exists(tableName));
+    assertTrue(new File("target/accumulo-maven-plugin/" + instance.getInstanceName() + "/testCreateTablePassed").createNewFile());
+  }
+  
+  @Test
+  public void writeToTable() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException {
+    String tableName = "writeToTable";
+    connector.tableOperations().create(tableName);
+    BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
+    Mutation m = new Mutation("ROW");
+    m.put("CF", "CQ", "V");
+    bw.addMutation(m);
+    bw.close();
+    Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+    int count = 0;
+    for (Entry<Key,Value> entry : scanner) {
+      count++;
+      assertEquals("ROW", entry.getKey().getRow().toString());
+      assertEquals("CF", entry.getKey().getColumnFamily().toString());
+      assertEquals("CQ", entry.getKey().getColumnQualifier().toString());
+      assertEquals("V", entry.getValue().toString());
+    }
+    assertEquals(1, count);
+    assertTrue(new File("target/accumulo-maven-plugin/" + instance.getInstanceName() + "/testWriteToTablePassed").createNewFile());
+  }
+  
+  @AfterClass
+  public static void tearDown() throws Exception {}
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/db39c354/maven-plugin/src/it/settings.xml
----------------------------------------------------------------------
diff --git a/maven-plugin/src/it/settings.xml b/maven-plugin/src/it/settings.xml
new file mode 100644
index 0000000..c8f77f0
--- /dev/null
+++ b/maven-plugin/src/it/settings.xml
@@ -0,0 +1,55 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+<settings>
+  <profiles>
+    <profile>
+      <id>it-repo</id>
+      <activation>
+        <activeByDefault>true</activeByDefault>
+      </activation>
+      <repositories>
+        <repository>
+          <id>local.central</id>
+          <url>@localRepositoryUrl@</url>
+          <releases>
+            <enabled>true</enabled>
+          </releases>
+          <snapshots>
+            <enabled>true</enabled>
+          </snapshots>
+        </repository>
+      </repositories>
+      <pluginRepositories>
+        <pluginRepository>
+          <id>local.central</id>
+          <url>@localRepositoryUrl@</url>
+          <releases>
+            <enabled>true</enabled>
+          </releases>
+          <snapshots>
+            <enabled>true</enabled>
+          </snapshots>
+        </pluginRepository>
+      </pluginRepositories>
+    </profile>
+  </profiles>
+</settings>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/db39c354/maven-plugin/src/main/java/org/accumulo/maven/plugin/AbstractAccumuloMojo.java
----------------------------------------------------------------------
diff --git a/maven-plugin/src/main/java/org/accumulo/maven/plugin/AbstractAccumuloMojo.java b/maven-plugin/src/main/java/org/accumulo/maven/plugin/AbstractAccumuloMojo.java
new file mode 100644
index 0000000..2ffcd29
--- /dev/null
+++ b/maven-plugin/src/main/java/org/accumulo/maven/plugin/AbstractAccumuloMojo.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.accumulo.maven.plugin;
+
+import java.io.File;
+import java.net.MalformedURLException;
+import java.util.List;
+
+import org.apache.maven.artifact.Artifact;
+import org.apache.maven.plugin.AbstractMojo;
+import org.apache.maven.plugins.annotations.Parameter;
+
+public abstract class AbstractAccumuloMojo extends AbstractMojo {
+  
+  @Parameter(defaultValue = "${plugin.artifacts}", readonly = true, required = true)
+  private List<Artifact> pluginArtifacts;
+  
+  void configureMiniClasspath() {
+    for (Artifact artifact : pluginArtifacts) {
+      try {
+        System.setProperty("java.class.path", System.getProperty("java.class.path", "") + File.pathSeparator + artifact.getFile().toURI().toURL());
+      } catch (MalformedURLException e) {
+        e.printStackTrace();
+      }
+    }
+    
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/db39c354/maven-plugin/src/main/java/org/accumulo/maven/plugin/StartMojo.java
----------------------------------------------------------------------
diff --git a/maven-plugin/src/main/java/org/accumulo/maven/plugin/StartMojo.java b/maven-plugin/src/main/java/org/accumulo/maven/plugin/StartMojo.java
new file mode 100644
index 0000000..fc0d716
--- /dev/null
+++ b/maven-plugin/src/main/java/org/accumulo/maven/plugin/StartMojo.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.accumulo.maven.plugin;
+
+import java.io.File;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.maven.plugin.MojoExecutionException;
+import org.apache.maven.plugins.annotations.LifecyclePhase;
+import org.apache.maven.plugins.annotations.Mojo;
+import org.apache.maven.plugins.annotations.Parameter;
+import org.apache.maven.plugins.annotations.ResolutionScope;
+
+/**
+ * Goal which starts an instance of {@link MiniAccumuloCluster}.
+ */
+@Mojo(name = "start", defaultPhase = LifecyclePhase.PRE_INTEGRATION_TEST, requiresDependencyResolution = ResolutionScope.TEST)
+public class StartMojo extends AbstractAccumuloMojo {
+  
+  @Parameter(defaultValue = "${project.build.directory}", property = "outputDir", required = true)
+  private File outputDirectory;
+  
+  @Parameter(defaultValue = "testInstance", property = "instanceName", required = true)
+  private String instanceName;
+  
+  @Parameter(defaultValue = "secret", property = "rootPassword", required = true)
+  private String rootPassword = "secret";
+  
+  static Set<MiniAccumuloCluster> runningClusters = Collections.synchronizedSet(new HashSet<MiniAccumuloCluster>());
+  
+  @Override
+  public void execute() throws MojoExecutionException {
+    File subdir = new File(new File(outputDirectory, "accumulo-maven-plugin"), instanceName);
+    subdir.mkdirs();
+    
+    try {
+      configureMiniClasspath();
+      MiniAccumuloConfig cfg = new MiniAccumuloConfig(subdir, rootPassword);
+      cfg.setInstanceName(instanceName);
+      MiniAccumuloCluster mac = new MiniAccumuloCluster(cfg);
+      System.out.println("Starting MiniAccumuloCluster: " + mac.getInstanceName());
+      mac.start();
+      runningClusters.add(mac);
+    } catch (Exception e) {
+      throw new MojoExecutionException("Unable to start " + MiniAccumuloCluster.class.getSimpleName(), e);
+    }
+    
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/db39c354/maven-plugin/src/main/java/org/accumulo/maven/plugin/StopMojo.java
----------------------------------------------------------------------
diff --git a/maven-plugin/src/main/java/org/accumulo/maven/plugin/StopMojo.java b/maven-plugin/src/main/java/org/accumulo/maven/plugin/StopMojo.java
new file mode 100644
index 0000000..84c2a6c
--- /dev/null
+++ b/maven-plugin/src/main/java/org/accumulo/maven/plugin/StopMojo.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.accumulo.maven.plugin;
+
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster.LogWriter;
+import org.apache.maven.plugin.MojoExecutionException;
+import org.apache.maven.plugins.annotations.LifecyclePhase;
+import org.apache.maven.plugins.annotations.Mojo;
+import org.apache.maven.plugins.annotations.ResolutionScope;
+
+/**
+ * Goal which stops all instances of {@link MiniAccumuloCluster} started with the start mojo.
+ */
+@Mojo(name = "stop", defaultPhase = LifecyclePhase.POST_INTEGRATION_TEST, requiresDependencyResolution = ResolutionScope.TEST)
+public class StopMojo extends AbstractAccumuloMojo {
+  
+  @Override
+  public void execute() throws MojoExecutionException {
+    for (MiniAccumuloCluster mac : StartMojo.runningClusters) {
+      System.out.println("Stopping MiniAccumuloCluster: " + mac.getInstanceName());
+      try {
+        mac.stop();
+        for (LogWriter log : mac.getLogWriters())
+          log.flush();
+      } catch (Exception e) {
+        throw new MojoExecutionException("Unable to start " + MiniAccumuloCluster.class.getSimpleName(), e);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/db39c354/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index 8da22e0..0418396 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -58,7 +58,7 @@ import org.apache.zookeeper.server.ZooKeeperServerMain;
 /**
  * A utility class that will create Zookeeper and Accumulo processes that write all of their data to a single local directory. This class makes it easy to test
  * code against a real Accumulo instance. Its much more accurate for testing than {@link org.apache.accumulo.core.client.mock.MockAccumulo}, but much slower.
- *
+ * 
  * @since 1.5.0
  */
 public class MiniAccumuloCluster {
@@ -66,14 +66,11 @@ public class MiniAccumuloCluster {
   public static class LogWriter extends Daemon {
     private BufferedReader in;
     private BufferedWriter out;
-
-    /**
-     * @throws IOException
-     */
+    
     public LogWriter(InputStream stream, File logFile) throws IOException {
       this.in = new BufferedReader(new InputStreamReader(stream));
       out = new BufferedWriter(new FileWriter(logFile));
-
+      
       SimpleTimer.getInstance().schedule(new Runnable() {
         @Override
         public void run() {
@@ -85,75 +82,74 @@ public class MiniAccumuloCluster {
         }
       }, 1000, 1000);
     }
-
+    
     public synchronized void flush() throws IOException {
       if (out != null)
         out.flush();
     }
-
+    
     @Override
     public void run() {
       String line;
-
+      
       try {
         while ((line = in.readLine()) != null) {
           out.append(line);
           out.append("\n");
         }
-
+        
         synchronized (this) {
           out.close();
           out = null;
           in.close();
         }
-
+        
       } catch (IOException e) {}
     }
   }
-
+  
   private boolean initialized = false;
   private Process zooKeeperProcess = null;
   private Process masterProcess = null;
   private List<Process> tabletServerProcesses = new ArrayList<Process>();
-
+  
   private Set<Pair<ServerType,Integer>> debugPorts = new HashSet<Pair<ServerType,Integer>>();
-
+  
   private File zooCfgFile;
-
+  
   public List<LogWriter> getLogWriters() {
     return logWriters;
   }
-
-
+  
   private List<LogWriter> logWriters = new ArrayList<MiniAccumuloCluster.LogWriter>();
-
+  
   private MiniAccumuloConfig config;
-
+  
   public Process exec(Class<? extends Object> clazz, String... args) throws IOException {
     return exec(clazz, Collections.singletonList("-Xmx" + config.getDefaultMemory()), args);
   }
-
+  
   private Process exec(Class<? extends Object> clazz, List<String> extraJvmOpts, String... args) throws IOException {
     String javaHome = System.getProperty("java.home");
     String javaBin = javaHome + File.separator + "bin" + File.separator + "java";
     String classpath = System.getProperty("java.class.path");
-
+    
     classpath = config.getConfDir().getAbsolutePath() + File.pathSeparator + classpath;
-
+    
     String className = clazz.getCanonicalName();
-
+    
     ArrayList<String> argList = new ArrayList<String>();
     argList.addAll(Arrays.asList(javaBin, "-Dproc=" + clazz.getSimpleName(), "-cp", classpath));
     argList.add("-Djava.library.path=" + config.getLibDir());
     argList.addAll(extraJvmOpts);
     argList.addAll(Arrays.asList("-XX:+UseConcMarkSweepGC", "-XX:CMSInitiatingOccupancyFraction=75", Main.class.getName(), className));
     argList.addAll(Arrays.asList(args));
-
+    
     ProcessBuilder builder = new ProcessBuilder(argList);
-
+    
     builder.environment().put("ACCUMULO_HOME", config.getDir().getAbsolutePath());
     builder.environment().put("ACCUMULO_LOG_DIR", config.getLogDir().getAbsolutePath());
-
+    
     // if we're running under accumulo.start, we forward these env vars
     String env = System.getenv("HADOOP_PREFIX");
     if (env != null)
@@ -161,9 +157,9 @@ public class MiniAccumuloCluster {
     env = System.getenv("ZOOKEEPER_HOME");
     if (env != null)
       builder.environment().put("ZOOKEEPER_HOME", env);
-
+    
     Process process = builder.start();
-
+    
     LogWriter lw;
     lw = new LogWriter(process.getErrorStream(), new File(config.getLogDir(), clazz.getSimpleName() + "_" + process.hashCode() + ".err"));
     logWriters.add(lw);
@@ -171,12 +167,12 @@ public class MiniAccumuloCluster {
     lw = new LogWriter(process.getInputStream(), new File(config.getLogDir(), clazz.getSimpleName() + "_" + process.hashCode() + ".out"));
     logWriters.add(lw);
     lw.start();
-
+    
     return process;
   }
-
+  
   private Process exec(Class<? extends Object> clazz, ServerType serverType, String... args) throws IOException {
-
+    
     List<String> jvmOpts = new ArrayList<String>();
     jvmOpts.add("-Xmx" + config.getMemory(serverType));
 
@@ -187,49 +183,47 @@ public class MiniAccumuloCluster {
     }
     return exec(clazz, jvmOpts, args);
   }
-
+  
   /**
-   *
+   * 
    * @param dir
    *          An empty or nonexistant temp directoy that Accumulo and Zookeeper can store data in. Creating the directory is left to the user. Java 7, Guava,
    *          and Junit provide methods for creating temporary directories.
    * @param rootPassword
    *          Initial root password for instance.
-   * @throws IOException
    */
   public MiniAccumuloCluster(File dir, String rootPassword) throws IOException {
     this(new MiniAccumuloConfig(dir, rootPassword));
   }
-
+  
   /**
    * @param config
    *          initial configuration
-   * @throws IOException
    */
   public MiniAccumuloCluster(MiniAccumuloConfig config) throws IOException {
-
+    
     this.config = config.initialize();
-
+    
     config.getConfDir().mkdirs();
     config.getAccumuloDir().mkdirs();
     config.getZooKeeperDir().mkdirs();
     config.getLogDir().mkdirs();
     config.getWalogDir().mkdirs();
     config.getLibDir().mkdirs();
-
+    
     File siteFile = new File(config.getConfDir(), "accumulo-site.xml");
-
+    
     FileWriter fileWriter = new FileWriter(siteFile);
     fileWriter.append("<configuration>\n");
-
+    
     for (Entry<String,String> entry : config.getSiteConfig().entrySet())
       fileWriter.append("<property><name>" + entry.getKey() + "</name><value>" + entry.getValue() + "</value></property>\n");
     fileWriter.append("</configuration>\n");
     fileWriter.close();
-
+    
     zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
     fileWriter = new FileWriter(zooCfgFile);
-
+    
     // zookeeper uses Properties to read its config, so use that to write in order to properly escape things like Windows paths
     Properties zooCfg = new Properties();
     zooCfg.setProperty("tickTime", "1000");
@@ -239,9 +233,9 @@ public class MiniAccumuloCluster {
     zooCfg.setProperty("maxClientCnxns", "100");
     zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
     zooCfg.store(fileWriter, null);
-
+    
     fileWriter.close();
-
+    
     File nativeMap = new File(config.getLibDir().getAbsolutePath() + "/native/map");
     nativeMap.mkdirs();
     File testRoot = new File(new File(new File(System.getProperty("user.dir")).getParent() + "/server/src/main/c++/nativeMap").getAbsolutePath());
@@ -254,17 +248,15 @@ public class MiniAccumuloCluster {
       }
     }
   }
-
+  
   /**
    * Starts Accumulo and Zookeeper processes. Can only be called once.
-   *
-   * @throws IOException
-   * @throws InterruptedException
+   * 
    * @throws IllegalStateException
    *           if already started
    */
   public void start() throws IOException, InterruptedException {
-
+    
     if (!initialized) {
       
       Runtime.getRuntime().addShutdownHook(new Thread() {
@@ -280,22 +272,22 @@ public class MiniAccumuloCluster {
         }
       });
     }
-
+    
     if (zooKeeperProcess == null) {
       zooKeeperProcess = exec(Main.class, ServerType.ZOOKEEPER, ZooKeeperServerMain.class.getName(), zooCfgFile.getAbsolutePath());
       // sleep a little bit to let zookeeper come up before calling init, seems to work better
       UtilWaitThread.sleep(250);
     }
-
+    
     if (!initialized) {
       Process initProcess = exec(Initialize.class, "--instance-name", config.getInstanceName(), "--password", config.getRootPassword(), "--username", "root");
       int ret = initProcess.waitFor();
       if (ret != 0) {
         throw new RuntimeException("Initialize process returned " + ret);
       }
-      initialized = true; 
+      initialized = true;
     }
-
+    
     for (int i = tabletServerProcesses.size(); i < config.getNumTservers(); i++) {
       tabletServerProcesses.add(exec(TabletServer.class, ServerType.TABLET_SERVER));
     }
@@ -308,14 +300,13 @@ public class MiniAccumuloCluster {
       masterProcess = exec(Master.class, ServerType.MASTER);
     }
   }
-
+  
   private List<String> buildRemoteDebugParams(int port) {
     return Arrays.asList(new String[] {"-Xdebug", String.format("-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=%d", port)});
   }
-
+  
   /**
    * @return generated remote debug ports if in debug mode.
-   *
    * @since 1.6.0
    */
   public Set<Pair<ServerType,Integer>> getDebugPorts() {
@@ -330,8 +321,8 @@ public class MiniAccumuloCluster {
     return result;
   }
   
-  public Map<ServerType, Collection<ProcessReference>> getProcesses() {
-    Map<ServerType, Collection<ProcessReference>> result = new HashMap<ServerType, Collection<ProcessReference>>();
+  public Map<ServerType,Collection<ProcessReference>> getProcesses() {
+    Map<ServerType,Collection<ProcessReference>> result = new HashMap<ServerType,Collection<ProcessReference>>();
     result.put(ServerType.MASTER, references(masterProcess));
     result.put(ServerType.TABLET_SERVER, references(tabletServerProcesses.toArray(new Process[0])));
     result.put(ServerType.ZOOKEEPER, references(zooKeeperProcess));
@@ -349,7 +340,7 @@ public class MiniAccumuloCluster {
         }
         break;
       case TABLET_SERVER:
-        for (Process tserver: tabletServerProcesses) {
+        for (Process tserver : tabletServerProcesses) {
           if (proc.equals(tserver)) {
             tabletServerProcesses.remove(tserver);
             found = true;
@@ -368,27 +359,24 @@ public class MiniAccumuloCluster {
     if (!found)
       throw new ProcessNotFoundException();
   }
-
+  
   /**
    * @return Accumulo instance name
    */
   public String getInstanceName() {
     return config.getInstanceName();
   }
-
+  
   /**
    * @return zookeeper connection string
    */
   public String getZooKeepers() {
     return config.getZooKeepers();
   }
-
+  
   /**
    * Stops Accumulo and Zookeeper processes. If stop is not called, there is a shutdown hook that is setup to kill the processes. However its probably best to
    * call stop in a finally block as soon as possible.
-   *
-   * @throws IOException
-   * @throws InterruptedException
    */
   public void stop() throws IOException, InterruptedException {
     if (zooKeeperProcess != null)
@@ -400,29 +388,25 @@ public class MiniAccumuloCluster {
         tserver.destroy();
       }
     }
-
+    
     for (LogWriter lw : logWriters)
       lw.flush();
     zooKeeperProcess = null;
     masterProcess = null;
     tabletServerProcesses.clear();
   }
-
+  
   /**
    * @since 1.6.0
    */
   public MiniAccumuloConfig getConfig() {
     return config;
   }
-
+  
   /**
    * Utility method to get a connector to the MAC.
+   * 
    * @since 1.6.0
-   * @param user
-   * @param passwd
-   * @return Connector
-   * @throws AccumuloException
-   * @throws AccumuloSecurityException
    */
   public Connector getConnector(String user, String passwd) throws AccumuloException, AccumuloSecurityException {
     Instance instance = new ZooKeeperInstance(this.getInstanceName(), this.getZooKeepers());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/db39c354/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloInstance.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloInstance.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloInstance.java
new file mode 100644
index 0000000..540d7ae
--- /dev/null
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloInstance.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.minicluster;
+
+import java.io.File;
+import java.net.MalformedURLException;
+
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.ZooKeeperInstance;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.hadoop.conf.Configuration;
+
+/**
+ * 
+ */
+public class MiniAccumuloInstance extends ZooKeeperInstance {
+  
+  /**
+   * Construct an {@link Instance} entry point to Accumulo using a {@link MiniAccumuloCluster} directory
+   */
+  public MiniAccumuloInstance(String instanceName, File directory) {
+    super(instanceName, getZooKeepersFromDir(directory));
+  }
+  
+  private static String getZooKeepersFromDir(File directory) {
+    if (!directory.isDirectory())
+      throw new IllegalArgumentException("Not a directory " + directory.getPath());
+    File configFile = new File(new File(directory, "conf"), "accumulo-site.xml");
+    Configuration conf = new Configuration(false);
+    try {
+      conf.addResource(configFile.toURI().toURL());
+    } catch (MalformedURLException e) {
+      throw new IllegalStateException("Missing file: " + configFile.getPath());
+    }
+    return conf.get(Property.INSTANCE_ZK_HOST.getKey());
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/db39c354/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0dc56a4..905e4e7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -83,6 +83,7 @@
     <module>test</module>
     <module>minicluster</module>
     <module>docs</module>
+    <module>maven-plugin</module>
   </modules>
   <scm>
     <connection>scm:svn:http://svn.apache.org/repos/asf/accumulo/trunk</connection>
@@ -297,6 +298,16 @@
         <version>${hadoop.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.apache.maven</groupId>
+        <artifactId>maven-plugin-api</artifactId>
+        <version>${maven.min-version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.maven.plugin-tools</groupId>
+        <artifactId>maven-plugin-annotations</artifactId>
+        <version>3.2</version>
+      </dependency>
+      <dependency>
         <groupId>org.apache.thrift</groupId>
         <artifactId>libthrift</artifactId>
         <version>0.9.0</version>
@@ -307,6 +318,11 @@
         <version>${zookeeper.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.codehaus.plexus</groupId>
+        <artifactId>plexus-utils</artifactId>
+        <version>3.0.10</version>
+      </dependency>
+      <dependency>
         <groupId>org.easymock</groupId>
         <artifactId>easymock</artifactId>
         <version>3.1</version>
@@ -571,6 +587,19 @@
                 </pluginExecution>
                 <pluginExecution>
                   <pluginExecutionFilter>
+                    <groupId>org.apache.maven.plugins</groupId>
+                    <artifactId>maven-invoker-plugin</artifactId>
+                    <versionRange>[1.7,)</versionRange>
+                    <goals>
+                      <goal>install</goal>
+                    </goals>
+                  </pluginExecutionFilter>
+                  <action>
+                    <ignore />
+                  </action>
+                </pluginExecution>
+                <pluginExecution>
+                  <pluginExecutionFilter>
                     <groupId>com.google.code.sortpom</groupId>
                     <artifactId>maven-sortpom-plugin</artifactId>
                     <versionRange>[2.1.0,)</versionRange>
@@ -630,18 +659,11 @@
             <id>run-integration-tests</id>
             <goals>
               <goal>integration-test</goal>
-            </goals>
-            <phase>integration-test</phase>
-          	<configuration>
-              <redirectTestOutputToFile>true</redirectTestOutputToFile>
-          	</configuration>
-          </execution>
-          <execution>
-            <id>verify-integration-tests</id>
-            <goals>
               <goal>verify</goal>
             </goals>
-            <phase>verify</phase>
+            <configuration>
+              <redirectTestOutputToFile>true</redirectTestOutputToFile>
+            </configuration>
           </execution>
         </executions>
       </plugin>
@@ -839,6 +861,7 @@
       <id>seal-jars</id>
       <properties>
         <sealJars>true</sealJars>
+        <skipITs>true</skipITs>
         <skipTests>true</skipTests>
       </properties>
     </profile>


[45/50] [abbrv] git commit: ACCUMULO-1496 Update scannotations version

Posted by ct...@apache.org.
ACCUMULO-1496 Update scannotations version


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/b17d9806
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/b17d9806
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/b17d9806

Branch: refs/heads/ACCUMULO-1496
Commit: b17d98069953366abcaca29f756acd95a384062e
Parents: 424caaa
Author: Christopher Tubbs <ct...@apache.org>
Authored: Tue Jul 16 15:13:27 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Tue Jul 16 15:13:27 2013 -0400

----------------------------------------------------------------------
 assemble/pom.xml                           | 8 ++++----
 assemble/src/main/assemblies/component.xml | 2 +-
 pom.xml                                    | 6 +++---
 start/pom.xml                              | 8 ++++----
 4 files changed, 12 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/b17d9806/assemble/pom.xml
----------------------------------------------------------------------
diff --git a/assemble/pom.xml b/assemble/pom.xml
index 96a1094..e7daa0b 100644
--- a/assemble/pom.xml
+++ b/assemble/pom.xml
@@ -47,10 +47,6 @@
       <artifactId>jline</artifactId>
     </dependency>
     <dependency>
-      <groupId>net.sf.scannotation</groupId>
-      <artifactId>scannotation</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-core</artifactId>
     </dependency>
@@ -98,6 +94,10 @@
       <groupId>org.javassist</groupId>
       <artifactId>javassist</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.scannotation</groupId>
+      <artifactId>scannotation</artifactId>
+    </dependency>
   </dependencies>
   <profiles>
     <profile>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/b17d9806/assemble/src/main/assemblies/component.xml
----------------------------------------------------------------------
diff --git a/assemble/src/main/assemblies/component.xml b/assemble/src/main/assemblies/component.xml
index c366c41..b5f1419 100644
--- a/assemble/src/main/assemblies/component.xml
+++ b/assemble/src/main/assemblies/component.xml
@@ -32,9 +32,9 @@
         <include>com.google.code.gson:gson</include>
         <include>com.google.guava:guava</include>
         <include>jline:jline</include>
-        <include>net.sf.scannotation:scannotation</include>
         <include>org.apache.thrift:libthrift</include>
         <include>org.javassist:javassist</include>
+        <include>org.scannotation:scannotation</include>
       </includes>
       <excludes>
         <exclude>${groupId}:${artifactId}-docs</exclude>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/b17d9806/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index f1534f7..39f6091 100644
--- a/pom.xml
+++ b/pom.xml
@@ -206,9 +206,9 @@
         <version>1.2.16</version>
       </dependency>
       <dependency>
-        <groupId>net.sf.scannotation</groupId>
+        <groupId>org.scannotation</groupId>
         <artifactId>scannotation</artifactId>
-        <version>1.0.2</version>
+        <version>1.0.3</version>
       </dependency>
       <dependency>
         <groupId>org.apache.accumulo</groupId>
@@ -335,7 +335,7 @@
       <dependency>
         <groupId>org.javassist</groupId>
         <artifactId>javassist</artifactId>
-        <version>3.17.1-GA</version>
+        <version>3.18.0-GA</version>
       </dependency>
       <dependency>
         <groupId>org.mortbay.jetty</groupId>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/b17d9806/start/pom.xml
----------------------------------------------------------------------
diff --git a/start/pom.xml b/start/pom.xml
index 61496bf..6cdd4a4 100644
--- a/start/pom.xml
+++ b/start/pom.xml
@@ -26,10 +26,6 @@
   <name>Start</name>
   <dependencies>
     <dependency>
-      <groupId>net.sf.scannotation</groupId>
-      <artifactId>scannotation</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.apache.commons</groupId>
       <artifactId>commons-vfs2</artifactId>
     </dependency>
@@ -38,6 +34,10 @@
       <artifactId>javassist</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.scannotation</groupId>
+      <artifactId>scannotation</artifactId>
+    </dependency>
+    <dependency>
       <groupId>commons-io</groupId>
       <artifactId>commons-io</artifactId>
       <scope>provided</scope>


[11/50] [abbrv] ACCUMULO-1481 : Add tests for splitting/merging root table; refactor to consolidate metadata constants and structures in an organized way; begin consolidating metadata ops into a servicer interface to abstract the code that actually does

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java b/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
deleted file mode 100644
index 3bd2c5b..0000000
--- a/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
+++ /dev/null
@@ -1,1275 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.server.util;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.IsolatedScanner;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.BatchWriterImpl;
-import org.apache.accumulo.core.client.impl.ScannerImpl;
-import org.apache.accumulo.core.client.impl.Writer;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.CredentialHelper;
-import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException;
-import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.util.FastFormat;
-import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.RootTable;
-import org.apache.accumulo.core.util.StringUtil;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.core.zookeeper.ZooUtil;
-import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
-import org.apache.accumulo.fate.zookeeper.ZooLock;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
-import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
-import org.apache.accumulo.server.ServerConstants;
-import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.fs.FileRef;
-import org.apache.accumulo.server.fs.VolumeManager;
-import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.server.master.state.TServerInstance;
-import org.apache.accumulo.server.security.SecurityConstants;
-import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-import org.apache.zookeeper.KeeperException;
-
-/**
- * provides a reference to the metadata table for updates by tablet servers
- */
-public class MetadataTable extends org.apache.accumulo.core.util.MetadataTable {
-  
-  private static final Text EMPTY_TEXT = new Text();
-  private static Map<TCredentials,Writer> root_tables = new HashMap<TCredentials,Writer>();
-  private static Map<TCredentials,Writer> metadata_tables = new HashMap<TCredentials,Writer>();
-  private static final Logger log = Logger.getLogger(MetadataTable.class);
-  
-  private static final int SAVE_ROOT_TABLET_RETRIES = 3;
-  
-  private MetadataTable() {}
-  
-  public synchronized static Writer getMetadataTable(TCredentials credentials) {
-    Writer metadataTable = metadata_tables.get(credentials);
-    if (metadataTable == null) {
-      metadataTable = new Writer(HdfsZooInstance.getInstance(), credentials, ID);
-      metadata_tables.put(credentials, metadataTable);
-    }
-    return metadataTable;
-  }
-  
-  public synchronized static Writer getRootTable(TCredentials credentials) {
-    Writer rootTable = root_tables.get(credentials);
-    if (rootTable == null) {
-      rootTable = new Writer(HdfsZooInstance.getInstance(), credentials, RootTable.ID);
-      root_tables.put(credentials, rootTable);
-    }
-    return rootTable;
-  }
-  
-  public static void putLockID(ZooLock zooLock, Mutation m) {
-    LOCK_COLUMN.put(m, new Value(zooLock.getLockID().serialize(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + "/").getBytes()));
-  }
-  
-  public static void update(TCredentials credentials, Mutation m, KeyExtent extent) {
-    update(credentials, null, m, extent);
-  }
-  
-  public static void update(TCredentials credentials, ZooLock zooLock, Mutation m, KeyExtent extent) {
-    Writer t = extent.isMeta() ? getRootTable(credentials) : getMetadataTable(credentials);
-    if (zooLock != null)
-      putLockID(zooLock, m);
-    while (true) {
-      try {
-        t.update(m);
-        return;
-      } catch (AccumuloException e) {
-        log.error(e, e);
-      } catch (AccumuloSecurityException e) {
-        log.error(e, e);
-      } catch (ConstraintViolationException e) {
-        log.error(e, e);
-      } catch (TableNotFoundException e) {
-        log.error(e, e);
-      }
-      UtilWaitThread.sleep(1000);
-    }
-    
-  }
-  
-  /**
-   * new data file update function adds one data file to a tablet's list
-   * 
-   * @param path
-   *          should be relative to the table directory
-   * 
-   */
-  public static void updateTabletDataFile(KeyExtent extent, FileRef path, FileRef mergeFile, DataFileValue dfv, String time, TCredentials credentials,
-      Set<FileRef> filesInUseByScans, String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
-    if (extent.equals(RootTable.EXTENT)) {
-      if (unusedWalLogs != null) {
-        IZooReaderWriter zk = ZooReaderWriter.getInstance();
-        // unusedWalLogs will contain the location/name of each log in a log set
-        // the log set is stored under one of the log names, but not both
-        // find the entry under one of the names and delete it.
-        String root = getZookeeperLogLocation();
-        boolean foundEntry = false;
-        for (String entry : unusedWalLogs) {
-          String[] parts = entry.split("/");
-          String zpath = root + "/" + parts[parts.length - 1];
-          while (true) {
-            try {
-              if (zk.exists(zpath)) {
-                zk.recursiveDelete(zpath, NodeMissingPolicy.SKIP);
-                foundEntry = true;
-              }
-              break;
-            } catch (KeeperException e) {
-              log.error(e, e);
-            } catch (InterruptedException e) {
-              log.error(e, e);
-            }
-            UtilWaitThread.sleep(1000);
-          }
-        }
-        if (unusedWalLogs.size() > 0 && !foundEntry)
-          log.warn("WALog entry for root tablet did not exist " + unusedWalLogs);
-      }
-      return;
-    }
-    
-    Mutation m = new Mutation(extent.getMetadataEntry());
-    
-    if (dfv.getNumEntries() > 0) {
-      m.put(DATAFILE_COLUMN_FAMILY, path.meta(), new Value(dfv.encode()));
-      TIME_COLUMN.put(m, new Value(time.getBytes()));
-      // stuff in this location
-      TServerInstance self = getTServerInstance(address, zooLock);
-      self.putLastLocation(m);
-      // erase the old location
-      if (lastLocation != null && !lastLocation.equals(self))
-        lastLocation.clearLastLocation(m);
-    }
-    if (unusedWalLogs != null) {
-      for (String entry : unusedWalLogs) {
-        m.putDelete(LOG_COLUMN_FAMILY, new Text(entry));
-      }
-    }
-    
-    for (FileRef scanFile : filesInUseByScans)
-      m.put(SCANFILE_COLUMN_FAMILY, scanFile.meta(), new Value("".getBytes()));
-    
-    if (mergeFile != null)
-      m.putDelete(DATAFILE_COLUMN_FAMILY, mergeFile.meta());
-    
-    FLUSH_COLUMN.put(m, new Value((flushId + "").getBytes()));
-    
-    update(credentials, zooLock, m, extent);
-    
-  }
-  
-  private static TServerInstance getTServerInstance(String address, ZooLock zooLock) {
-    while (true) {
-      try {
-        return new TServerInstance(address, zooLock.getSessionId());
-      } catch (KeeperException e) {
-        log.error(e, e);
-      } catch (InterruptedException e) {
-        log.error(e, e);
-      }
-      UtilWaitThread.sleep(1000);
-    }
-  }
-  
-  public static void updateTabletFlushID(KeyExtent extent, long flushID, TCredentials credentials, ZooLock zooLock) {
-    if (!extent.isRootTablet()) {
-      Mutation m = new Mutation(extent.getMetadataEntry());
-      FLUSH_COLUMN.put(m, new Value((flushID + "").getBytes()));
-      update(credentials, zooLock, m, extent);
-    }
-  }
-  
-  public static void updateTabletCompactID(KeyExtent extent, long compactID, TCredentials credentials, ZooLock zooLock) {
-    if (!extent.isRootTablet()) {
-      Mutation m = new Mutation(extent.getMetadataEntry());
-      COMPACT_COLUMN.put(m, new Value((compactID + "").getBytes()));
-      update(credentials, zooLock, m, extent);
-    }
-  }
-  
-  public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef,DataFileValue> estSizes, String time, TCredentials credentials,
-      ZooLock zooLock) {
-    Mutation m = new Mutation(extent.getMetadataEntry());
-    byte[] tidBytes = Long.toString(tid).getBytes();
-    
-    for (Entry<FileRef,DataFileValue> entry : estSizes.entrySet()) {
-      Text file = entry.getKey().meta();
-      m.put(DATAFILE_COLUMN_FAMILY, file, new Value(entry.getValue().encode()));
-      m.put(BULKFILE_COLUMN_FAMILY, file, new Value(tidBytes));
-    }
-    TIME_COLUMN.put(m, new Value(time.getBytes()));
-    update(credentials, zooLock, m, extent);
-  }
-  
-  public static void addTablet(KeyExtent extent, String path, TCredentials credentials, char timeType, ZooLock lock) {
-    Mutation m = extent.getPrevRowUpdateMutation();
-    
-    DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
-    TIME_COLUMN.put(m, new Value((timeType + "0").getBytes()));
-    
-    update(credentials, lock, m, extent);
-  }
-  
-  public static void updateTabletPrevEndRow(KeyExtent extent, TCredentials credentials) {
-    Mutation m = extent.getPrevRowUpdateMutation(); //
-    update(credentials, m, extent);
-  }
-  
-  /**
-   * convenience method for reading entries from the metadata table
-   */
-  public static SortedMap<KeyExtent,Text> getMetadataDirectoryEntries(SortedMap<Key,Value> entries) {
-    Key key;
-    Value val;
-    Text datafile = null;
-    Value prevRow = null;
-    KeyExtent ke;
-    
-    SortedMap<KeyExtent,Text> results = new TreeMap<KeyExtent,Text>();
-    
-    Text lastRowFromKey = new Text();
-    
-    // text obj below is meant to be reused in loop for efficiency
-    Text colf = new Text();
-    Text colq = new Text();
-    
-    for (Entry<Key,Value> entry : entries.entrySet()) {
-      key = entry.getKey();
-      val = entry.getValue();
-      
-      if (key.compareRow(lastRowFromKey) != 0) {
-        prevRow = null;
-        datafile = null;
-        key.getRow(lastRowFromKey);
-      }
-      
-      colf = key.getColumnFamily(colf);
-      colq = key.getColumnQualifier(colq);
-      
-      // interpret the row id as a key extent
-      if (DIRECTORY_COLUMN.equals(colf, colq))
-        datafile = new Text(val.toString());
-      
-      else if (PREV_ROW_COLUMN.equals(colf, colq))
-        prevRow = new Value(val);
-      
-      if (datafile != null && prevRow != null) {
-        ke = new KeyExtent(key.getRow(), prevRow);
-        results.put(ke, datafile);
-        
-        datafile = null;
-        prevRow = null;
-      }
-    }
-    return results;
-  }
-  
-  public static boolean recordRootTabletLocation(String address) {
-    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-    for (int i = 0; i < SAVE_ROOT_TABLET_RETRIES; i++) {
-      try {
-        log.info("trying to write root tablet location to ZooKeeper as " + address);
-        String zRootLocPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + RootTable.ZROOT_TABLET_LOCATION;
-        zoo.putPersistentData(zRootLocPath, address.getBytes(), NodeExistsPolicy.OVERWRITE);
-        return true;
-      } catch (Exception e) {
-        log.error("Master: unable to save root tablet location in zookeeper. exception: " + e, e);
-      }
-    }
-    log.error("Giving up after " + SAVE_ROOT_TABLET_RETRIES + " retries");
-    return false;
-  }
-  
-  public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, TCredentials credentials) throws IOException {
-    TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();
-    
-    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
-    mdScanner.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
-    Text row = extent.getMetadataEntry();
-    VolumeManager fs = VolumeManagerImpl.get();
-    
-    Key endKey = new Key(row, DATAFILE_COLUMN_FAMILY, new Text(""));
-    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
-    
-    mdScanner.setRange(new Range(new Key(row), endKey));
-    for (Entry<Key,Value> entry : mdScanner) {
-      
-      if (!entry.getKey().getRow().equals(row))
-        break;
-      DataFileValue dfv = new DataFileValue(entry.getValue().get());
-      sizes.put(new FileRef(fs, entry.getKey()), dfv);
-    }
-    
-    return sizes;
-  }
-  
-  public static void addNewTablet(KeyExtent extent, String path, TServerInstance location, Map<FileRef,DataFileValue> datafileSizes,
-      Map<FileRef,Long> bulkLoadedFiles, TCredentials credentials, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
-    Mutation m = extent.getPrevRowUpdateMutation();
-    
-    DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
-    TIME_COLUMN.put(m, new Value(time.getBytes()));
-    if (lastFlushID > 0)
-      FLUSH_COLUMN.put(m, new Value(("" + lastFlushID).getBytes()));
-    if (lastCompactID > 0)
-      COMPACT_COLUMN.put(m, new Value(("" + lastCompactID).getBytes()));
-    
-    if (location != null) {
-      m.put(CURRENT_LOCATION_COLUMN_FAMILY, location.asColumnQualifier(), location.asMutationValue());
-      m.putDelete(FUTURE_LOCATION_COLUMN_FAMILY, location.asColumnQualifier());
-    }
-    
-    for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
-      m.put(DATAFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(entry.getValue().encode()));
-    }
-    
-    for (Entry<FileRef,Long> entry : bulkLoadedFiles.entrySet()) {
-      byte[] tidBytes = Long.toString(entry.getValue()).getBytes();
-      m.put(BULKFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(tidBytes));
-    }
-    
-    update(credentials, zooLock, m, extent);
-  }
-  
-  public static void rollBackSplit(Text metadataEntry, Text oldPrevEndRow, TCredentials credentials, ZooLock zooLock) {
-    KeyExtent ke = new KeyExtent(metadataEntry, oldPrevEndRow);
-    Mutation m = ke.getPrevRowUpdateMutation();
-    SPLIT_RATIO_COLUMN.putDelete(m);
-    OLD_PREV_ROW_COLUMN.putDelete(m);
-    update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
-  }
-  
-  public static void splitTablet(KeyExtent extent, Text oldPrevEndRow, double splitRatio, TCredentials credentials, ZooLock zooLock) {
-    Mutation m = extent.getPrevRowUpdateMutation(); //
-    
-    SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(splitRatio).getBytes()));
-    
-    OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(oldPrevEndRow));
-    CHOPPED_COLUMN.putDelete(m);
-    update(credentials, zooLock, m, extent);
-  }
-  
-  public static void finishSplit(Text metadataEntry, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, TCredentials credentials,
-      ZooLock zooLock) {
-    Mutation m = new Mutation(metadataEntry);
-    SPLIT_RATIO_COLUMN.putDelete(m);
-    OLD_PREV_ROW_COLUMN.putDelete(m);
-    CHOPPED_COLUMN.putDelete(m);
-    
-    for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
-      m.put(DATAFILE_COLUMN_FAMILY, entry.getKey().meta(), new Value(entry.getValue().encode()));
-    }
-    
-    for (FileRef pathToRemove : highDatafilesToRemove) {
-      m.putDelete(DATAFILE_COLUMN_FAMILY, pathToRemove.meta());
-    }
-    
-    update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
-  }
-  
-  public static void finishSplit(KeyExtent extent, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, TCredentials credentials,
-      ZooLock zooLock) {
-    finishSplit(extent.getMetadataEntry(), datafileSizes, highDatafilesToRemove, credentials, zooLock);
-  }
-  
-  public static void replaceDatafiles(KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId,
-      DataFileValue size, TCredentials credentials, String address, TServerInstance lastLocation, ZooLock zooLock) throws IOException {
-    replaceDatafiles(extent, datafilesToDelete, scanFiles, path, compactionId, size, credentials, address, lastLocation, zooLock, true);
-  }
-  
-  public static void replaceDatafiles(KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId,
-      DataFileValue size, TCredentials credentials, String address, TServerInstance lastLocation, ZooLock zooLock, boolean insertDeleteFlags)
-      throws IOException {
-    
-    if (insertDeleteFlags) {
-      // add delete flags for those paths before the data file reference is removed
-      addDeleteEntries(extent, datafilesToDelete, credentials);
-    }
-    
-    // replace data file references to old mapfiles with the new mapfiles
-    Mutation m = new Mutation(extent.getMetadataEntry());
-    
-    for (FileRef pathToRemove : datafilesToDelete)
-      m.putDelete(DATAFILE_COLUMN_FAMILY, pathToRemove.meta());
-    
-    for (FileRef scanFile : scanFiles)
-      m.put(SCANFILE_COLUMN_FAMILY, scanFile.meta(), new Value("".getBytes()));
-    
-    if (size.getNumEntries() > 0)
-      m.put(DATAFILE_COLUMN_FAMILY, path.meta(), new Value(size.encode()));
-    
-    if (compactionId != null)
-      COMPACT_COLUMN.put(m, new Value(("" + compactionId).getBytes()));
-    
-    TServerInstance self = getTServerInstance(address, zooLock);
-    self.putLastLocation(m);
-    
-    // remove the old location
-    if (lastLocation != null && !lastLocation.equals(self))
-      lastLocation.clearLastLocation(m);
-    
-    update(credentials, zooLock, m, extent);
-  }
-  
-  public static void addDeleteEntries(KeyExtent extent, Set<FileRef> datafilesToDelete, TCredentials credentials) throws IOException {
-    
-    String tableId = extent.getTableId().toString();
-    
-    // TODO could use batch writer,would need to handle failure and retry like update does - ACCUMULO-1294
-    for (FileRef pathToRemove : datafilesToDelete) {
-      update(credentials, createDeleteMutation(tableId, pathToRemove.path().toString()), extent);
-    }
-  }
-  
-  public static void addDeleteEntry(String tableId, String path) throws IOException {
-    update(SecurityConstants.getSystemCredentials(), createDeleteMutation(tableId, path), new KeyExtent(new Text(tableId), null, null));
-  }
-  
-  public static Mutation createDeleteMutation(String tableId, String pathToRemove) throws IOException {
-    String prefix = DELETED_RANGE.getStartKey().getRow().toString();
-    
-    if (!pathToRemove.contains(":")) {
-      if (pathToRemove.startsWith("../"))
-        pathToRemove = pathToRemove.substring(2);
-      else
-        pathToRemove = "/" + tableId + "/" + pathToRemove;
-    }
-    
-    Path path = VolumeManagerImpl.get().getFullPath(ServerConstants.getTablesDirs(), pathToRemove);
-    Mutation delFlag = new Mutation(new Text(prefix + path.toString()));
-    delFlag.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
-    return delFlag;
-  }
-  
-  public static void removeScanFiles(KeyExtent extent, Set<FileRef> scanFiles, TCredentials credentials, ZooLock zooLock) {
-    Mutation m = new Mutation(extent.getMetadataEntry());
-    
-    for (FileRef pathToRemove : scanFiles)
-      m.putDelete(SCANFILE_COLUMN_FAMILY, pathToRemove.meta());
-    
-    update(credentials, zooLock, m, extent);
-  }
-  
-  private static KeyExtent fixSplit(Text table, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio, TServerInstance tserver,
-      TCredentials credentials, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
-    if (metadataPrevEndRow == null)
-      // something is wrong, this should not happen... if a tablet is split, it will always have a
-      // prev end row....
-      throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
-    
-    // check to see if prev tablet exist in metadata tablet
-    Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(table, metadataPrevEndRow)));
-    
-    ScannerImpl scanner2 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
-    scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
-    
-    VolumeManager fs = VolumeManagerImpl.get();
-    if (!scanner2.iterator().hasNext()) {
-      log.info("Rolling back incomplete split " + metadataEntry + " " + metadataPrevEndRow);
-      rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), credentials, lock);
-      return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
-    } else {
-      log.info("Finishing incomplete split " + metadataEntry + " " + metadataPrevEndRow);
-      
-      List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
-      
-      Scanner scanner3 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
-      Key rowKey = new Key(metadataEntry);
-      
-      SortedMap<FileRef,DataFileValue> origDatafileSizes = new TreeMap<FileRef,DataFileValue>();
-      SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
-      SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<FileRef,DataFileValue>();
-      scanner3.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
-      scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
-      
-      for (Entry<Key,Value> entry : scanner3) {
-        if (entry.getKey().compareColumnFamily(DATAFILE_COLUMN_FAMILY) == 0) {
-          origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
-        }
-      }
-      
-      splitDatafiles(table, metadataPrevEndRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes,
-          highDatafilesToRemove);
-      
-      MetadataTable.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, credentials, lock);
-      
-      return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
-    }
-    
-  }
-  
-  public static void splitDatafiles(Text table, Text midRow, double splitRatio, Map<FileRef,FileUtil.FileInfo> firstAndLastRows,
-      SortedMap<FileRef,DataFileValue> datafiles, SortedMap<FileRef,DataFileValue> lowDatafileSizes, SortedMap<FileRef,DataFileValue> highDatafileSizes,
-      List<FileRef> highDatafilesToRemove) {
-    
-    for (Entry<FileRef,DataFileValue> entry : datafiles.entrySet()) {
-      
-      Text firstRow = null;
-      Text lastRow = null;
-      
-      boolean rowsKnown = false;
-      
-      FileUtil.FileInfo mfi = firstAndLastRows.get(entry.getKey());
-      
-      if (mfi != null) {
-        firstRow = mfi.getFirstRow();
-        lastRow = mfi.getLastRow();
-        rowsKnown = true;
-      }
-      
-      if (rowsKnown && firstRow.compareTo(midRow) > 0) {
-        // only in high
-        long highSize = entry.getValue().getSize();
-        long highEntries = entry.getValue().getNumEntries();
-        highDatafileSizes.put(entry.getKey(), new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
-      } else if (rowsKnown && lastRow.compareTo(midRow) <= 0) {
-        // only in low
-        long lowSize = entry.getValue().getSize();
-        long lowEntries = entry.getValue().getNumEntries();
-        lowDatafileSizes.put(entry.getKey(), new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));
-        
-        highDatafilesToRemove.add(entry.getKey());
-      } else {
-        long lowSize = (long) Math.floor((entry.getValue().getSize() * splitRatio));
-        long lowEntries = (long) Math.floor((entry.getValue().getNumEntries() * splitRatio));
-        lowDatafileSizes.put(entry.getKey(), new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));
-        
-        long highSize = (long) Math.ceil((entry.getValue().getSize() * (1.0 - splitRatio)));
-        long highEntries = (long) Math.ceil((entry.getValue().getNumEntries() * (1.0 - splitRatio)));
-        highDatafileSizes.put(entry.getKey(), new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
-      }
-    }
-  }
-  
-  public static KeyExtent fixSplit(Text metadataEntry, SortedMap<ColumnFQ,Value> columns, TServerInstance tserver, TCredentials credentials, ZooLock lock)
-      throws AccumuloException, IOException {
-    log.info("Incomplete split " + metadataEntry + " attempting to fix");
-    
-    Value oper = columns.get(OLD_PREV_ROW_COLUMN);
-    
-    if (columns.get(SPLIT_RATIO_COLUMN) == null) {
-      throw new IllegalArgumentException("Metadata entry does not have split ratio (" + metadataEntry + ")");
-    }
-    
-    double splitRatio = Double.parseDouble(new String(columns.get(SPLIT_RATIO_COLUMN).get()));
-    
-    Value prevEndRowIBW = columns.get(PREV_ROW_COLUMN);
-    
-    if (prevEndRowIBW == null) {
-      throw new IllegalArgumentException("Metadata entry does not have prev row (" + metadataEntry + ")");
-    }
-    
-    Value time = columns.get(TIME_COLUMN);
-    
-    if (time == null) {
-      throw new IllegalArgumentException("Metadata entry does not have time (" + metadataEntry + ")");
-    }
-    
-    Value flushID = columns.get(FLUSH_COLUMN);
-    long initFlushID = -1;
-    if (flushID != null)
-      initFlushID = Long.parseLong(flushID.toString());
-    
-    Value compactID = columns.get(COMPACT_COLUMN);
-    long initCompactID = -1;
-    if (compactID != null)
-      initCompactID = Long.parseLong(compactID.toString());
-    
-    Text metadataPrevEndRow = KeyExtent.decodePrevEndRow(prevEndRowIBW);
-    
-    Text table = (new KeyExtent(metadataEntry, (Text) null)).getTableId();
-    
-    return fixSplit(table, metadataEntry, metadataPrevEndRow, oper, splitRatio, tserver, credentials, time.toString(), initFlushID, initCompactID, lock);
-  }
-  
-  public static void deleteTable(String tableId, boolean insertDeletes, TCredentials credentials, ZooLock lock) throws AccumuloException, IOException {
-    Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, ID, Authorizations.EMPTY);
-    Text tableIdText = new Text(tableId);
-    BatchWriter bw = new BatchWriterImpl(HdfsZooInstance.getInstance(), credentials, ID, new BatchWriterConfig().setMaxMemory(1000000)
-        .setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2));
-    
-    // scan metadata for our table and delete everything we find
-    Mutation m = null;
-    ms.setRange(new KeyExtent(tableIdText, null, null).toMetadataRange());
-    
-    // insert deletes before deleting data from !METADATA... this makes the code fault tolerant
-    if (insertDeletes) {
-      
-      ms.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
-      DIRECTORY_COLUMN.fetch(ms);
-      
-      for (Entry<Key,Value> cell : ms) {
-        Key key = cell.getKey();
-        
-        if (key.getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
-          FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
-          bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
-        }
-        
-        if (DIRECTORY_COLUMN.hasColumns(key)) {
-          bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
-        }
-      }
-      
-      bw.flush();
-      
-      ms.clearColumns();
-    }
-    
-    for (Entry<Key,Value> cell : ms) {
-      Key key = cell.getKey();
-      
-      if (m == null) {
-        m = new Mutation(key.getRow());
-        if (lock != null)
-          putLockID(lock, m);
-      }
-      
-      if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
-        bw.addMutation(m);
-        m = new Mutation(key.getRow());
-        if (lock != null)
-          putLockID(lock, m);
-      }
-      m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
-    }
-    
-    if (m != null)
-      bw.addMutation(m);
-    
-    bw.close();
-  }
-  
-  public static class LogEntry {
-    public KeyExtent extent;
-    public long timestamp;
-    public String server;
-    public String filename;
-    public int tabletId;
-    public Collection<String> logSet;
-    
-    @Override
-    public String toString() {
-      return extent.toString() + " " + filename + " (" + tabletId + ")";
-    }
-    
-    public String getName() {
-      return server + "/" + filename;
-    }
-    
-    public byte[] toBytes() throws IOException {
-      DataOutputBuffer out = new DataOutputBuffer();
-      extent.write(out);
-      out.writeLong(timestamp);
-      out.writeUTF(server);
-      out.writeUTF(filename.toString());
-      out.write(tabletId);
-      out.write(logSet.size());
-      for (String s : logSet) {
-        out.writeUTF(s);
-      }
-      return Arrays.copyOf(out.getData(), out.getLength());
-    }
-    
-    public void fromBytes(byte bytes[]) throws IOException {
-      DataInputBuffer inp = new DataInputBuffer();
-      inp.reset(bytes, bytes.length);
-      extent = new KeyExtent();
-      extent.readFields(inp);
-      timestamp = inp.readLong();
-      server = inp.readUTF();
-      filename = inp.readUTF();
-      tabletId = inp.read();
-      int count = inp.read();
-      ArrayList<String> logSet = new ArrayList<String>(count);
-      for (int i = 0; i < count; i++)
-        logSet.add(inp.readUTF());
-      this.logSet = logSet;
-    }
-    
-  }
-  
-  private static String getZookeeperLogLocation() {
-    return ZooUtil.getRoot(HdfsZooInstance.getInstance()) + RootTable.ZROOT_TABLET_WALOGS;
-  }
-  
-  public static void addLogEntry(TCredentials credentials, LogEntry entry, ZooLock zooLock) {
-    if (entry.extent.isRootTablet()) {
-      String root = getZookeeperLogLocation();
-      while (true) {
-        try {
-          IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-          if (zoo.isLockHeld(zooLock.getLockID())) {
-            String[] parts = entry.filename.split("/");
-            String uniqueId = parts[parts.length - 1];
-            zoo.putPersistentData(root + "/" + uniqueId, entry.toBytes(), NodeExistsPolicy.OVERWRITE);
-          }
-          break;
-        } catch (KeeperException e) {
-          log.error(e, e);
-        } catch (InterruptedException e) {
-          log.error(e, e);
-        } catch (IOException e) {
-          log.error(e, e);
-        }
-        UtilWaitThread.sleep(1000);
-      }
-    } else {
-      String value = StringUtil.join(entry.logSet, ";") + "|" + entry.tabletId;
-      Mutation m = new Mutation(entry.extent.getMetadataEntry());
-      m.put(LOG_COLUMN_FAMILY, new Text(entry.server + "/" + entry.filename), new Value(value.getBytes()));
-      update(credentials, zooLock, m, entry.extent);
-    }
-  }
-  
-  public static LogEntry entryFromKeyValue(Key key, Value value) {
-    MetadataTable.LogEntry e = new MetadataTable.LogEntry();
-    e.extent = new KeyExtent(key.getRow(), EMPTY_TEXT);
-    String[] parts = key.getColumnQualifier().toString().split("/", 2);
-    e.server = parts[0];
-    e.filename = parts[1];
-    parts = value.toString().split("\\|");
-    e.tabletId = Integer.parseInt(parts[1]);
-    e.logSet = Arrays.asList(parts[0].split(";"));
-    e.timestamp = key.getTimestamp();
-    return e;
-  }
-  
-  public static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> getFileAndLogEntries(TCredentials credentials, KeyExtent extent) throws KeeperException,
-      InterruptedException, IOException {
-    ArrayList<LogEntry> result = new ArrayList<LogEntry>();
-    TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();
-    
-    VolumeManager fs = VolumeManagerImpl.get();
-    if (extent.isRootTablet()) {
-      getRootLogEntries(result);
-      Path rootDir = new Path(ServerConstants.getRootTabletDir());
-      rootDir = rootDir.makeQualified(fs.getDefaultVolume());
-      FileStatus[] files = fs.listStatus(rootDir);
-      for (FileStatus fileStatus : files) {
-        if (fileStatus.getPath().toString().endsWith("_tmp")) {
-          continue;
-        }
-        DataFileValue dfv = new DataFileValue(0, 0);
-        sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
-      }
-      
-    } else {
-      String systemTableToCheck = extent.isMeta() ? RootTable.ID : ID;
-      Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, systemTableToCheck, Authorizations.EMPTY);
-      scanner.fetchColumnFamily(LOG_COLUMN_FAMILY);
-      scanner.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
-      scanner.setRange(extent.toMetadataRange());
-      
-      for (Entry<Key,Value> entry : scanner) {
-        if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
-          throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
-        }
-        
-        if (entry.getKey().getColumnFamily().equals(LOG_COLUMN_FAMILY)) {
-          result.add(entryFromKeyValue(entry.getKey(), entry.getValue()));
-        } else if (entry.getKey().getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
-          DataFileValue dfv = new DataFileValue(entry.getValue().get());
-          sizes.put(new FileRef(fs, entry.getKey()), dfv);
-        } else {
-          throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
-        }
-      }
-    }
-    
-    return new Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>>(result, sizes);
-  }
-  
-  public static List<LogEntry> getLogEntries(TCredentials credentials, KeyExtent extent) throws IOException, KeeperException, InterruptedException {
-    log.info("Scanning logging entries for " + extent);
-    ArrayList<LogEntry> result = new ArrayList<LogEntry>();
-    if (extent.equals(RootTable.EXTENT)) {
-      log.info("Getting logs for root tablet from zookeeper");
-      getRootLogEntries(result);
-    } else {
-      log.info("Scanning metadata for logs used for tablet " + extent);
-      Scanner scanner = getTabletLogScanner(credentials, extent);
-      Text pattern = extent.getMetadataEntry();
-      for (Entry<Key,Value> entry : scanner) {
-        Text row = entry.getKey().getRow();
-        if (entry.getKey().getColumnFamily().equals(LOG_COLUMN_FAMILY)) {
-          if (row.equals(pattern)) {
-            result.add(entryFromKeyValue(entry.getKey(), entry.getValue()));
-          }
-        }
-      }
-    }
-    
-    Collections.sort(result, new Comparator<LogEntry>() {
-      @Override
-      public int compare(LogEntry o1, LogEntry o2) {
-        long diff = o1.timestamp - o2.timestamp;
-        if (diff < 0)
-          return -1;
-        if (diff > 0)
-          return 1;
-        return 0;
-      }
-    });
-    log.info("Returning logs " + result + " for extent " + extent);
-    return result;
-  }
-  
-  private static void getRootLogEntries(ArrayList<LogEntry> result) throws KeeperException, InterruptedException, IOException {
-    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-    String root = getZookeeperLogLocation();
-    // there's a little race between getting the children and fetching
-    // the data. The log can be removed in between.
-    while (true) {
-      result.clear();
-      for (String child : zoo.getChildren(root)) {
-        LogEntry e = new LogEntry();
-        try {
-          e.fromBytes(zoo.getData(root + "/" + child, null));
-          result.add(e);
-        } catch (KeeperException.NoNodeException ex) {
-          continue;
-        }
-      }
-      break;
-    }
-  }
-  
-  private static Scanner getTabletLogScanner(TCredentials credentials, KeyExtent extent) {
-    String tableId = ID;
-    if (extent.isMeta())
-      tableId = RootTable.ID;
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, tableId, Authorizations.EMPTY);
-    scanner.fetchColumnFamily(LOG_COLUMN_FAMILY);
-    Text start = extent.getMetadataEntry();
-    Key endKey = new Key(start, LOG_COLUMN_FAMILY);
-    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
-    scanner.setRange(new Range(new Key(start), endKey));
-    return scanner;
-  }
-  
-  static class LogEntryIterator implements Iterator<LogEntry> {
-    
-    Iterator<LogEntry> rootTabletEntries = null;
-    Iterator<Entry<Key,Value>> metadataEntries = null;
-    
-    LogEntryIterator(TCredentials creds) throws IOException, KeeperException, InterruptedException {
-      rootTabletEntries = getLogEntries(creds, RootTable.EXTENT).iterator();
-      try {
-        Scanner scanner = HdfsZooInstance.getInstance().getConnector(creds.getPrincipal(), CredentialHelper.extractToken(creds))
-            .createScanner(NAME, Authorizations.EMPTY);
-        log.info("Setting range to " + KEYSPACE);
-        scanner.setRange(KEYSPACE);
-        scanner.fetchColumnFamily(LOG_COLUMN_FAMILY);
-        metadataEntries = scanner.iterator();
-      } catch (Exception ex) {
-        throw new IOException(ex);
-      }
-    }
-    
-    @Override
-    public boolean hasNext() {
-      return rootTabletEntries.hasNext() || metadataEntries.hasNext();
-    }
-    
-    @Override
-    public LogEntry next() {
-      if (rootTabletEntries.hasNext()) {
-        return rootTabletEntries.next();
-      }
-      Entry<Key,Value> entry = metadataEntries.next();
-      return entryFromKeyValue(entry.getKey(), entry.getValue());
-    }
-    
-    @Override
-    public void remove() {
-      throw new UnsupportedOperationException();
-    }
-  }
-  
-  public static Iterator<LogEntry> getLogEntries(TCredentials creds) throws IOException, KeeperException, InterruptedException {
-    return new LogEntryIterator(creds);
-  }
-  
-  public static void removeUnusedWALEntries(KeyExtent extent, List<LogEntry> logEntries, ZooLock zooLock) {
-    for (LogEntry entry : logEntries) {
-      if (entry.extent.isRootTablet()) {
-        String root = getZookeeperLogLocation();
-        while (true) {
-          try {
-            IZooReaderWriter zoo = ZooReaderWriter.getInstance();
-            if (zoo.isLockHeld(zooLock.getLockID()))
-              zoo.recursiveDelete(root + "/" + entry.filename, NodeMissingPolicy.SKIP);
-            break;
-          } catch (Exception e) {
-            log.error(e, e);
-          }
-          UtilWaitThread.sleep(1000);
-        }
-      } else {
-        Mutation m = new Mutation(entry.extent.getMetadataEntry());
-        m.putDelete(LOG_COLUMN_FAMILY, new Text(entry.server + "/" + entry.filename));
-        update(SecurityConstants.getSystemCredentials(), zooLock, m, entry.extent);
-      }
-    }
-  }
-  
-  private static void getFiles(Set<String> files, Map<Key,Value> tablet, String srcTableId) {
-    for (Entry<Key,Value> entry : tablet.entrySet()) {
-      if (entry.getKey().getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
-        String cf = entry.getKey().getColumnQualifier().toString();
-        if (srcTableId != null && !cf.startsWith("../") && !cf.contains(":")) {
-          cf = "../" + srcTableId + entry.getKey().getColumnQualifier();
-        }
-        files.add(cf);
-      }
-    }
-  }
-  
-  private static Mutation createCloneMutation(String srcTableId, String tableId, Map<Key,Value> tablet) {
-    
-    KeyExtent ke = new KeyExtent(tablet.keySet().iterator().next().getRow(), (Text) null);
-    Mutation m = new Mutation(KeyExtent.getMetadataEntry(new Text(tableId), ke.getEndRow()));
-    
-    for (Entry<Key,Value> entry : tablet.entrySet()) {
-      if (entry.getKey().getColumnFamily().equals(DATAFILE_COLUMN_FAMILY)) {
-        String cf = entry.getKey().getColumnQualifier().toString();
-        if (!cf.startsWith("../") && !cf.contains(":"))
-          cf = "../" + srcTableId + entry.getKey().getColumnQualifier();
-        m.put(entry.getKey().getColumnFamily(), new Text(cf), entry.getValue());
-      } else if (entry.getKey().getColumnFamily().equals(CURRENT_LOCATION_COLUMN_FAMILY)) {
-        m.put(LAST_LOCATION_COLUMN_FAMILY, entry.getKey().getColumnQualifier(), entry.getValue());
-      } else if (entry.getKey().getColumnFamily().equals(LAST_LOCATION_COLUMN_FAMILY)) {
-        // skip
-      } else {
-        m.put(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier(), entry.getValue());
-      }
-    }
-    return m;
-  }
-  
-  private static Scanner createCloneScanner(String tableId, Connector conn) throws TableNotFoundException {
-    Scanner mscanner = new IsolatedScanner(conn.createScanner(NAME, Authorizations.EMPTY));
-    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-    mscanner.fetchColumnFamily(DATAFILE_COLUMN_FAMILY);
-    mscanner.fetchColumnFamily(CURRENT_LOCATION_COLUMN_FAMILY);
-    mscanner.fetchColumnFamily(LAST_LOCATION_COLUMN_FAMILY);
-    mscanner.fetchColumnFamily(CLONED_COLUMN_FAMILY);
-    PREV_ROW_COLUMN.fetch(mscanner);
-    TIME_COLUMN.fetch(mscanner);
-    return mscanner;
-  }
-  
-  static void initializeClone(String srcTableId, String tableId, Connector conn, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException {
-    TabletIterator ti = new TabletIterator(createCloneScanner(srcTableId, conn), new KeyExtent(new Text(srcTableId), null, null).toMetadataRange(), true, true);
-    
-    if (!ti.hasNext())
-      throw new RuntimeException(" table deleted during clone?  srcTableId = " + srcTableId);
-    
-    while (ti.hasNext())
-      bw.addMutation(createCloneMutation(srcTableId, tableId, ti.next()));
-    
-    bw.flush();
-  }
-  
-  static int compareEndRows(Text endRow1, Text endRow2) {
-    return new KeyExtent(new Text("0"), endRow1, null).compareTo(new KeyExtent(new Text("0"), endRow2, null));
-  }
-  
-  static int checkClone(String srcTableId, String tableId, Connector conn, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException {
-    TabletIterator srcIter = new TabletIterator(createCloneScanner(srcTableId, conn), new KeyExtent(new Text(srcTableId), null, null).toMetadataRange(), true,
-        true);
-    TabletIterator cloneIter = new TabletIterator(createCloneScanner(tableId, conn), new KeyExtent(new Text(tableId), null, null).toMetadataRange(), true, true);
-    
-    if (!cloneIter.hasNext() || !srcIter.hasNext())
-      throw new RuntimeException(" table deleted during clone?  srcTableId = " + srcTableId + " tableId=" + tableId);
-    
-    int rewrites = 0;
-    
-    while (cloneIter.hasNext()) {
-      Map<Key,Value> cloneTablet = cloneIter.next();
-      Text cloneEndRow = new KeyExtent(cloneTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();
-      HashSet<String> cloneFiles = new HashSet<String>();
-      
-      boolean cloneSuccessful = false;
-      for (Entry<Key,Value> entry : cloneTablet.entrySet()) {
-        if (entry.getKey().getColumnFamily().equals(CLONED_COLUMN_FAMILY)) {
-          cloneSuccessful = true;
-          break;
-        }
-      }
-      
-      if (!cloneSuccessful)
-        getFiles(cloneFiles, cloneTablet, null);
-      
-      List<Map<Key,Value>> srcTablets = new ArrayList<Map<Key,Value>>();
-      Map<Key,Value> srcTablet = srcIter.next();
-      srcTablets.add(srcTablet);
-      
-      Text srcEndRow = new KeyExtent(srcTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();
-      
-      int cmp = compareEndRows(cloneEndRow, srcEndRow);
-      if (cmp < 0)
-        throw new TabletIterator.TabletDeletedException("Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);
-      
-      HashSet<String> srcFiles = new HashSet<String>();
-      if (!cloneSuccessful)
-        getFiles(srcFiles, srcTablet, srcTableId);
-      
-      while (cmp > 0) {
-        srcTablet = srcIter.next();
-        srcTablets.add(srcTablet);
-        srcEndRow = new KeyExtent(srcTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();
-        cmp = compareEndRows(cloneEndRow, srcEndRow);
-        if (cmp < 0)
-          throw new TabletIterator.TabletDeletedException("Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);
-        
-        if (!cloneSuccessful)
-          getFiles(srcFiles, srcTablet, srcTableId);
-      }
-      
-      if (cloneSuccessful)
-        continue;
-      
-      if (!srcFiles.containsAll(cloneFiles)) {
-        // delete existing cloned tablet entry
-        Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow());
-        
-        for (Entry<Key,Value> entry : cloneTablet.entrySet()) {
-          Key k = entry.getKey();
-          m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), k.getTimestamp());
-        }
-        
-        bw.addMutation(m);
-        
-        for (Map<Key,Value> st : srcTablets)
-          bw.addMutation(createCloneMutation(srcTableId, tableId, st));
-        
-        rewrites++;
-      } else {
-        // write out marker that this tablet was successfully cloned
-        Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow());
-        m.put(CLONED_COLUMN_FAMILY, new Text(""), new Value("OK".getBytes()));
-        bw.addMutation(m);
-      }
-    }
-    
-    bw.flush();
-    return rewrites;
-  }
-  
-  public static void cloneTable(Instance instance, String srcTableId, String tableId) throws Exception {
-    
-    Connector conn = instance.getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
-    BatchWriter bw = conn.createBatchWriter(NAME, new BatchWriterConfig());
-    
-    while (true) {
-      
-      try {
-        initializeClone(srcTableId, tableId, conn, bw);
-        
-        // the following loop looks changes in the file that occurred during the copy.. if files were dereferenced then they could have been GCed
-        
-        while (true) {
-          int rewrites = checkClone(srcTableId, tableId, conn, bw);
-          
-          if (rewrites == 0)
-            break;
-        }
-        
-        bw.flush();
-        break;
-        
-      } catch (TabletIterator.TabletDeletedException tde) {
-        // tablets were merged in the src table
-        bw.flush();
-        
-        // delete what we have cloned and try again
-        deleteTable(tableId, false, SecurityConstants.getSystemCredentials(), null);
-        
-        log.debug("Tablets merged in table " + srcTableId + " while attempting to clone, trying again");
-        
-        UtilWaitThread.sleep(100);
-      }
-    }
-    
-    // delete the clone markers and create directory entries
-    Scanner mscanner = conn.createScanner(NAME, Authorizations.EMPTY);
-    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-    mscanner.fetchColumnFamily(CLONED_COLUMN_FAMILY);
-    
-    int dirCount = 0;
-    
-    for (Entry<Key,Value> entry : mscanner) {
-      Key k = entry.getKey();
-      Mutation m = new Mutation(k.getRow());
-      m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
-      DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
-      bw.addMutation(m);
-    }
-    
-    bw.close();
-    
-  }
-  
-  public static void chopped(KeyExtent extent, ZooLock zooLock) {
-    Mutation m = new Mutation(extent.getMetadataEntry());
-    CHOPPED_COLUMN.put(m, new Value("chopped".getBytes()));
-    update(SecurityConstants.getSystemCredentials(), zooLock, m, extent);
-  }
-  
-  public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception {
-    Scanner mscanner = new IsolatedScanner(conn.createScanner(NAME, Authorizations.EMPTY));
-    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-    mscanner.fetchColumnFamily(BULKFILE_COLUMN_FAMILY);
-    BatchWriter bw = conn.createBatchWriter(NAME, new BatchWriterConfig());
-    for (Entry<Key,Value> entry : mscanner) {
-      log.debug("Looking at entry " + entry + " with tid " + tid);
-      if (Long.parseLong(entry.getValue().toString()) == tid) {
-        log.debug("deleting entry " + entry);
-        Mutation m = new Mutation(entry.getKey().getRow());
-        m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
-        bw.addMutation(m);
-      }
-    }
-    bw.close();
-  }
-  
-  public static List<FileRef> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid) throws IOException {
-    List<FileRef> result = new ArrayList<FileRef>();
-    try {
-      VolumeManager fs = VolumeManagerImpl.get();
-      Scanner mscanner = new IsolatedScanner(conn.createScanner(extent.isMeta() ? RootTable.NAME : NAME, Authorizations.EMPTY));
-      mscanner.setRange(extent.toMetadataRange());
-      mscanner.fetchColumnFamily(BULKFILE_COLUMN_FAMILY);
-      for (Entry<Key,Value> entry : mscanner) {
-        if (Long.parseLong(entry.getValue().toString()) == tid) {
-          result.add(new FileRef(fs, entry.getKey()));
-        }
-      }
-      return result;
-    } catch (TableNotFoundException ex) {
-      // unlikely
-      throw new RuntimeException("Onos! teh metadata table has vanished!!");
-    }
-  }
-  
-  public static Map<FileRef,Long> getBulkFilesLoaded(TCredentials credentials, KeyExtent extent) throws IOException {
-    Text metadataRow = extent.getMetadataEntry();
-    Map<FileRef,Long> ret = new HashMap<FileRef,Long>();
-    
-    VolumeManager fs = VolumeManagerImpl.get();
-    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, extent.isMeta() ? RootTable.ID : ID, Authorizations.EMPTY);
-    scanner.setRange(new Range(metadataRow));
-    scanner.fetchColumnFamily(BULKFILE_COLUMN_FAMILY);
-    for (Entry<Key,Value> entry : scanner) {
-      Long tid = Long.parseLong(entry.getValue().toString());
-      ret.put(new FileRef(fs, entry.getKey()), tid);
-    }
-    return ret;
-  }
-  
-  public static void addBulkLoadInProgressFlag(String path) {
-    
-    Mutation m = new Mutation(BLIP_FLAG_PREFIX + path);
-    m.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
-    
-    // new KeyExtent is only added to force update to write to the metadata table, not the root table
-    // because bulk loads aren't supported to the metadata table
-    update(SecurityConstants.getSystemCredentials(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
-  }
-  
-  public static void removeBulkLoadInProgressFlag(String path) {
-    
-    Mutation m = new Mutation(BLIP_FLAG_PREFIX + path);
-    m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
-    
-    // new KeyExtent is only added to force update to write to the metadata table, not the root table
-    // because bulk loads aren't supported to the metadata table
-    update(SecurityConstants.getSystemCredentials(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
-  }
-  
-  public static void moveMetaDeleteMarkers(Instance instance, TCredentials creds) {
-    if (true)
-      throw new UnsupportedOperationException();
-    // move delete markers from the normal delete keyspace to the root tablet delete keyspace if the files are for the !METADATA table
-    Scanner scanner = new ScannerImpl(instance, creds, ID, Authorizations.EMPTY);
-    scanner.setRange(new Range(DELETED_RANGE));
-    for (Entry<Key,Value> entry : scanner) {
-      String row = entry.getKey().getRow().toString();
-      if (row.startsWith(DELETED_RANGE.getStartKey().getRow().toString())) {
-        String filename = row.substring(DELETED_RANGE.getStartKey().getRow().toString().length());
-        // add the new entry first
-        log.info("Moving " + filename + " marker to the root tablet");
-        Mutation m = new Mutation(DELETED_RANGE.getStartKey().getRow().toString() + filename);
-        m.put(new byte[] {}, new byte[] {}, new byte[] {});
-        update(creds, m, null);
-        // remove the old entry
-        m = new Mutation(entry.getKey().getRow());
-        m.putDelete(new byte[] {}, new byte[] {});
-        update(creds, m, null);
-      } else {
-        break;
-      }
-    }
-    
-  }
-}


[24/50] [abbrv] ACCUMULO-1537 completed the conversion of functional tests to IT; also converted ShellServerTest to an IT

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java b/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java
index f0cdd01..227bed8 100644
--- a/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/TableOperationsIT.java
@@ -72,7 +72,7 @@ public class TableOperationsIT {
     connector = accumuloCluster.getConnector(ROOT, ROOT_PASS);
   }
   
-  @Test
+  @Test(timeout=30*1000)
   public void getDiskUsageErrors() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException, TException {
     connector.tableOperations().create("table1");
     List<DiskUsage> diskUsage = connector.tableOperations().getDiskUsage(Collections.singleton("table1"));
@@ -93,7 +93,7 @@ public class TableOperationsIT {
     } catch (TableNotFoundException e) {}
   }
   
-  @Test
+  @Test(timeout=30*1000)
   public void getDiskUsage() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException, TException {
     
     connector.tableOperations().create("table1");
@@ -148,7 +148,7 @@ public class TableOperationsIT {
     connector.tableOperations().delete("table1");
   }
   
-  @Test
+  @Test(timeout=30*1000)
   public void createTable() throws TableExistsException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
     connector.tableOperations().create("table1");
     Iterable<Map.Entry<String,String>> itrProps = connector.tableOperations().getProperties("table1");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
index fb28715..e225073 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
@@ -28,7 +28,7 @@ import org.junit.Test;
 
 public class BinaryIT extends MacTest {
   
-  @Test
+  @Test(timeout=30*1000)
   public void test() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("bt");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
index d222991..9e8e5d3 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
@@ -49,15 +49,15 @@ public class BloomFilterIT extends MacTest {
     cfg.setDefaultMemory(500, MemoryUnit.MEGABYTE);
   }
   
-  @Test(timeout=500*1000)
+  @Test(timeout=200*1000)
   public void test() throws Exception {
     Connector c = getConnector();
     for (String table : "bt1 bt2 bt3 bt4".split(" ")) {
       c.tableOperations().create(table);
     }
-    write(c, "bt1", 1, 0, 1000000000, 100);
-    write(c, "bt2", 2, 0, 1000000000, 100);
-    write(c, "bt3", 3, 0, 1000000000, 100);
+    write(c, "bt1", 1, 0, 1000000000, 250);
+    write(c, "bt2", 2, 0, 1000000000, 250);
+    write(c, "bt3", 3, 0, 1000000000, 250);
     
     // test inserting an empty key
     BatchWriter bw = c.createBatchWriter("bt4", new BatchWriterConfig());
@@ -80,9 +80,9 @@ public class BloomFilterIT extends MacTest {
     FunctionalTestUtils.checkRFiles(c, "bt4", 1, 1, 1, 1);
     
     // these queries should only run quickly if bloom filters are working, so lets get a base
-    long t1 = query(c, "bt1", 1, 0, 1000000000, 100000, 100);
-    long t2 = query(c, "bt2", 2, 0, 1000000000, 100000, 100);
-    long t3 = query(c, "bt3", 3, 0, 1000000000, 100000, 100);
+    long t1 = query(c, "bt1", 1, 0, 1000000000, 100000, 250);
+    long t2 = query(c, "bt2", 2, 0, 1000000000, 100000, 250);
+    long t3 = query(c, "bt3", 3, 0, 1000000000, 100000, 250);
     
     c.tableOperations().setProperty("bt1", Property.TABLE_BLOOM_ENABLED.getKey(), "true");
     c.tableOperations().setProperty("bt1", Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName());
@@ -102,9 +102,9 @@ public class BloomFilterIT extends MacTest {
     
     // these queries should only run quickly if bloom
     // filters are working
-    long tb1 = query(c, "bt1", 1, 0, 1000000000, 100000, 100);
-    long tb2 = query(c, "bt2", 2, 0, 1000000000, 100000, 100);
-    long tb3 = query(c, "bt3", 3, 0, 1000000000, 100000, 100);
+    long tb1 = query(c, "bt1", 1, 0, 1000000000, 100000, 250);
+    long tb2 = query(c, "bt2", 2, 0, 1000000000, 100000, 250);
+    long tb3 = query(c, "bt3", 3, 0, 1000000000, 100000, 250);
     
     timeCheck(t1, tb1);
     timeCheck(t2, tb2);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java
index 5e33383..d5115ff 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BulkIT.java
@@ -16,8 +16,6 @@
  */
 package org.apache.accumulo.test.functional;
 
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.test.TestIngest;
@@ -49,24 +47,24 @@ public class BulkIT extends MacTest {
     for (int i = 0; i < COUNT; i++) {
       opts.outputFile = base + String.format("/testrf/rf%02d", i);
       opts.startRow = N * i;
-      TestIngest.ingest(c, opts , new BatchWriterOpts());
+      TestIngest.ingest(c, opts , BWOPTS);
     }
     opts.outputFile = base + String.format("/testrf/rf%02d", N);
     opts.startRow = N;
     opts.rows = 1;
     // create an rfile with one entry, there was a bug with this:
-    TestIngest.ingest(c, opts , new BatchWriterOpts());
+    TestIngest.ingest(c, opts , BWOPTS);
     c.tableOperations().importDirectory("test_ingest", base + "/testrf", base + "/testBulkFail", false);
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
     vopts.random = 56;
     for (int i = 0; i < COUNT; i++) {
       vopts.startRow = i * N;
       vopts.rows = N;
-      VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+      VerifyIngest.verifyIngest(c, vopts, SOPTS);
     }
     vopts.startRow = N;
     vopts.rows = 1;
-    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+    VerifyIngest.verifyIngest(c, vopts, SOPTS);
   }
   
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
index aa258e2..32e871e 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
@@ -19,7 +19,6 @@ package org.apache.accumulo.test.functional;
 import java.util.Collections;
 
 import org.apache.accumulo.core.cli.ClientOpts.Password;
-import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.util.CachedConfiguration;
@@ -83,7 +82,7 @@ public class BulkSplitOptimizationIT extends MacTest {
     opts.startRow = 0;
     opts.cols = 1;
     opts.password = new Password(PASSWORD);
-    VerifyIngest.verifyIngest(c, opts, new ScannerOpts());
+    VerifyIngest.verifyIngest(c, opts, SOPTS);
     
     // ensure each tablet does not have all map files
     FunctionalTestUtils.checkRFiles(c, TABLE_NAME, 50, 100, 1, 4);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBlancerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBlancerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBlancerIT.java
index fb46f1e..0e30a7a 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBlancerIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBlancerIT.java
@@ -21,8 +21,6 @@ import java.util.Map;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
@@ -57,9 +55,9 @@ public class ChaoticBlancerIT extends MacTest {
     TestIngest.Opts opts = new TestIngest.Opts();
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
     vopts.rows = opts.rows = 200000;
-    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    TestIngest.ingest(c, opts, BWOPTS);
     c.tableOperations().flush("test_ingest", null, null, true);
-    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+    VerifyIngest.verifyIngest(c, vopts, SOPTS);
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
index ad9e3fa..36c852e 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
@@ -18,7 +18,6 @@ package org.apache.accumulo.test.functional;
 
 import static org.junit.Assert.assertEquals;
 
-import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.server.util.Admin;
@@ -43,9 +42,9 @@ public class DeleteIT extends MacTest {
     vopts.rows = opts.rows = 1000;
     vopts.cols = opts.cols = 1;
     vopts.random = opts.random = 56;
-    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    TestIngest.ingest(c, opts, BWOPTS);
     assertEquals(0, cluster.exec(TestRandomDeletes.class, "-p", MacTest.PASSWORD, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers()).waitFor());
-    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    TestIngest.ingest(c, opts, BWOPTS);
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
index 6e4e1a7..d954974 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
@@ -20,7 +20,6 @@ import static org.junit.Assert.assertTrue;
 
 import java.util.Collections;
 
-import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.impl.MasterClient;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
@@ -41,23 +40,23 @@ public class DynamicThreadPoolsIT extends MacTest {
   
   @Override
   public void configure(MiniAccumuloConfig cfg) {
-    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "1"));
+    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "100ms"));
   }
   
-  @Test(timeout = 90 * 1000)
+  @Test(timeout = 30 * 1000)
   public void test() throws Exception {
     Connector c = getConnector();
+    c.instanceOperations().setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
     TestIngest.Opts opts = new TestIngest.Opts();
-    opts.rows = 100000;
+    opts.rows = 100*1000;
     opts.createTable = true;
-    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    TestIngest.ingest(c, opts, BWOPTS);
     c.tableOperations().flush("test_ingest", null, null, true);
     c.tableOperations().clone("test_ingest", "test_ingest2", true, null, null);
     c.tableOperations().clone("test_ingest", "test_ingest3", true, null, null);
     c.tableOperations().clone("test_ingest", "test_ingest4", true, null, null);
     c.tableOperations().clone("test_ingest", "test_ingest5", true, null, null);
     c.tableOperations().clone("test_ingest", "test_ingest6", true, null, null);
-    c.instanceOperations().setProperty(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
     
     TCredentials creds = CredentialHelper.create("root", new PasswordToken(MacTest.PASSWORD), c.getInstance().getInstanceName());
     UtilWaitThread.sleep(10);
@@ -79,6 +78,8 @@ public class DynamicThreadPoolsIT extends MacTest {
           count += table.majors.running;
         }
       }
+      System.out.println("count " + count);
+      UtilWaitThread.sleep(1000);
     }
     assertTrue(count == 1 || count == 2); // sometimes we get two threads due to the way the stats are pulled
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
new file mode 100644
index 0000000..2064627
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
@@ -0,0 +1,158 @@
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.Daemon;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.server.tabletserver.TabletServer;
+import org.apache.accumulo.start.Main;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+public class HalfDeadTServerIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    cfg.setNumTservers(1);
+    Map<String,String> siteConfig = new HashMap<String,String>();
+    siteConfig.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "15s");
+    siteConfig.put(Property.GENERAL_RPC_TIMEOUT.getKey(), "5s");
+    cfg.setSiteConfig(siteConfig );
+  }
+  
+  class DumpOutput extends Daemon {
+    
+    private final BufferedReader rdr;
+    private final StringBuilder output;
+    
+    DumpOutput(InputStream is) {
+      rdr = new BufferedReader(new InputStreamReader(is));
+      output = new StringBuilder();
+    }
+    @Override
+    public void run() {
+      try {
+        while (true) {
+          String line = rdr.readLine();
+          if (line == null)
+            break;
+          System.out.println(line);
+          output.append(line);
+          output.append("\n");
+        }
+      } catch (IOException ex) {
+        log.error(ex, ex);
+      }
+    }
+    
+    @Override
+    public String toString() {
+      return output.toString();
+    }
+  }
+  
+  
+  @Test(timeout=30*1000)
+  public void testRecover() throws Exception {
+    test(10);
+  }
+  
+  @Test(timeout=60*1000)
+  public void testTimeout() throws Exception {
+    String results = test(40);
+    if (results != null)
+      assertTrue(results.contains("Session expired"));
+  }
+  
+  public String test(int seconds) throws Exception {
+    if (!makeDiskFailureLibrary())
+      return null;
+    Connector c = getConnector();
+    assertEquals(1, c.instanceOperations().getTabletServers().size());
+    // don't need the regular tablet server
+    cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
+    
+    // create our own tablet server with the special test library
+    String javaHome = System.getProperty("java.home");
+    String javaBin = javaHome + File.separator + "bin" + File.separator + "java";
+    String classpath = System.getProperty("java.class.path");
+    classpath = new File(cluster.getConfig().getDir(), "conf") + File.pathSeparator + classpath;
+    String className = TabletServer.class.getCanonicalName();
+    ArrayList<String> argList = new ArrayList<String>();
+    argList.addAll(Arrays.asList(javaBin, "-cp", classpath));
+    argList.addAll(Arrays.asList(Main.class.getName(), className));
+    ProcessBuilder builder = new ProcessBuilder(argList);
+    Map<String,String> env = builder.environment();
+    env.put("ACCUMULO_HOME", cluster.getConfig().getDir().getAbsolutePath());
+    env.put("ACCUMULO_LOG_DIR", cluster.getConfig().getLogDir().getAbsolutePath());
+    String trickFilename = cluster.getConfig().getLogDir().getAbsolutePath() + "/TRICK_FILE";
+    env.put("TRICK_FILE", trickFilename);
+    String libPath = System.getProperty("user.dir") + "/target/fake_disk_failure.so";
+    env.put("LD_PRELOAD", libPath);
+    env.put("DYLD_INSERT_LIBRARIES", libPath);
+    env.put("DYLD_FORCE_FLAT_NAMESPACE", "true");
+    Process tserver = builder.start();
+    DumpOutput t = new DumpOutput(tserver.getInputStream());
+    t.start();
+    c.tableOperations().create("test_ingest");
+    assertTrue(c.instanceOperations().getTabletServers().size() > 1);
+    int rows = 100*1000;
+    Process ingest = cluster.exec(TestIngest.class, "-u", "root", "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", MacTest.PASSWORD, "--rows", rows + "");
+    UtilWaitThread.sleep(500);
+    
+    // block I/O with some side-channel trickiness
+    File trickFile = new File(trickFilename);
+    trickFile.createNewFile();
+    UtilWaitThread.sleep(seconds*1000);
+    trickFile.delete();
+
+    if (seconds <= 10) {
+      assertEquals(0, ingest.waitFor());
+      VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+      vopts.rows = rows;
+      VerifyIngest.verifyIngest(c, vopts, SOPTS);
+    } else {
+      UtilWaitThread.sleep(5*1000);
+    }
+    // verify the process was blocked 
+    String results = t.toString();
+    assertTrue(results.contains("sleeping\nsleeping\nsleeping\n"));
+    assertTrue(results.contains("Zookeeper error, will retry"));
+    ingest.destroy();
+    tserver.destroy();
+    t.join();
+    return results;
+  }
+
+  private boolean makeDiskFailureLibrary() throws Exception {
+    String root = System.getProperty("user.dir");
+    String source = root + "/src/test/c/fake_disk_failure.c";
+    String lib = root + "/target/fake_disk_failure.so";
+    String platform = System.getProperty("os.name");
+    String cmd[];
+    if (platform.equals("Darwin")) {
+      cmd = new String[]{"gcc","-arch","x86_64","-arch","i386","-dynamiclib","-O3","-fPIC", source,"-o",lib};
+    } else {
+      cmd = new String[]{"gcc","-D_GNU_SOURCE","-Wall","-fPIC", source,"-shared", "-o", lib, "-ldl"};
+    }
+    Process gcc = Runtime.getRuntime().exec(cmd);
+    return gcc.waitFor() == 0;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
new file mode 100644
index 0000000..7edb8df
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Collections;
+
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.junit.Test;
+
+/**
+ * Fake the "tablet stops talking but holds its lock" problem we see when hard drives and NFS fail. 
+ * Start a ZombieTServer, and see that master stops it.
+ */
+public class LateLastContactIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    cfg.setSiteConfig(Collections.singletonMap(Property.GENERAL_RPC_TIMEOUT.getKey(), "2s"));
+  }
+
+  @Test
+  public void test() throws Exception {
+    Process zombie = cluster.exec(ZombieTServer.class);
+    assertEquals(0, zombie.waitFor());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java b/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
index dac87f4..0a288c5 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
@@ -16,10 +16,13 @@
  */
 package org.apache.accumulo.test.functional;
 
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster.LogWriter;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.log4j.Logger;
 import org.junit.After;
@@ -31,6 +34,8 @@ public class MacTest {
   public static TemporaryFolder folder = new TemporaryFolder();
   public static MiniAccumuloCluster cluster;
   public static final String PASSWORD = "secret";
+  static final ScannerOpts SOPTS = new ScannerOpts();
+  static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
   
   public Connector getConnector() throws AccumuloException, AccumuloSecurityException {
     return cluster.getConnector("root", PASSWORD);
@@ -50,7 +55,10 @@ public class MacTest {
   
   @After
   public void tearDown() throws Exception {
-    cluster.stop();
+    if (cluster != null)
+      cluster.stop();
+    for (LogWriter log : cluster.getLogWriters())
+      log.flush();
     folder.delete();
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
index 2b84d49..b8592d9 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
@@ -43,7 +43,7 @@ public class MapReduceIT extends MacTest {
   static final String output_cq = "cq-MD4BASE64";
   static final String output_cfcq =  input_cf + ":" + output_cq;
   
-  @Test
+  @Test(timeout=30*1000)
   public void test() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create(tablename);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
index 2934fd2..f5cc2cf 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
@@ -53,7 +53,7 @@ public class MaxOpenIT extends MacTest {
   private static final int NUM_TABLETS = 16;
   private static final int NUM_TO_INGEST = 10000;
   
-  @Test
+  @Test(timeout=30*1000)
   public void run() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
new file mode 100644
index 0000000..35f6251
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.Collections;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.junit.Test;
+
+public class MetadataSplitIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "100ms"));
+  }
+ 
+  @Test(timeout = 30 * 1000)
+  public void test() throws Exception {
+    Connector c = getConnector();
+    assertEquals(1, c.tableOperations().listSplits(MetadataTable.NAME).size());
+    c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "500");
+    for (int i = 0; i < 10; i++) {
+      c.tableOperations().create("table" + i);
+      c.tableOperations().flush(MetadataTable.NAME, null, null, true);
+    }
+    UtilWaitThread.sleep(10*1000);
+    assertTrue(c.tableOperations().listSplits(MetadataTable.NAME).size() > 2);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java b/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
new file mode 100644
index 0000000..b5b6953
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
+
+public class NativeMapIT extends MacTest {
+  
+  @Test
+  public void test() throws Exception {
+    assertEquals(0, cluster.exec(NativeMapTest.class).waitFor());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
index 561d453..c0fc4ea 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
@@ -284,7 +284,7 @@ public class PermissionsIT extends MacTest {
   
   private static final String TEST_TABLE = "__TABLE_PERMISSION_TEST__";
   
-  @Test
+  @Test(timeout=30*1000)
   public void tablePermissionTest() throws Exception {
     // create the test user
     Connector c = getConnector();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
new file mode 100644
index 0000000..9c6549e
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.minicluster.ProcessReference;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+public class RestartIT extends MacTest {
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    cfg.setSiteConfig(Collections.singletonMap(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s"));
+  }
+
+  private static final ScannerOpts SOPTS = new ScannerOpts();
+  private static final VerifyIngest.Opts VOPTS = new VerifyIngest.Opts();
+  private static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
+  
+  @Test
+  public void restartMaster() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    Process ingest = cluster.exec(TestIngest.class, 
+        "-u", "root", "-p", MacTest.PASSWORD, 
+        "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers());
+    for (ProcessReference master : cluster.getProcesses().get(ServerType.MASTER)) {
+      cluster.killProcess(ServerType.MASTER, master);
+    }
+    cluster.start();
+    assertEquals(0, ingest.waitFor());
+    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+    ingest.destroy();
+  }
+  
+  @Test
+  public void restartMasterRecovery() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    TestIngest.Opts opts = new TestIngest.Opts();
+    TestIngest.ingest(c, opts, BWOPTS);
+    for (Entry<ServerType,Collection<ProcessReference>> entry : cluster.getProcesses().entrySet()) {
+      for (ProcessReference proc : entry.getValue()) {
+        cluster.killProcess(entry.getKey(), proc);
+      }
+    }
+    cluster.start();
+    UtilWaitThread.sleep(5);
+    for (ProcessReference master : cluster.getProcesses().get(ServerType.MASTER)) {
+      cluster.killProcess(ServerType.MASTER, master);
+    }
+    cluster.start();
+    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+  }
+  
+  @Test
+  public void restartMasterSplit() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
+    Process ingest = cluster.exec(TestIngest.class, 
+        "-u", "root", "-p", MacTest.PASSWORD, 
+        "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers());
+    for (ProcessReference master : cluster.getProcesses().get(ServerType.MASTER)) {
+      cluster.killProcess(ServerType.MASTER, master);
+    }
+    cluster.start();
+    assertEquals(0, ingest.waitFor());
+    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+    ingest.destroy();
+  }
+  
+  @Test
+  public void killedTabletServer() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    TestIngest.Opts opts = new TestIngest.Opts();
+    TestIngest.ingest(c, opts, BWOPTS);
+    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+    List<ProcessReference> procs = new ArrayList<ProcessReference>(cluster.getProcesses().get(ServerType.TABLET_SERVER));
+    for (ProcessReference tserver : procs) {
+      cluster.killProcess(ServerType.TABLET_SERVER, tserver);
+      cluster.start();
+      VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+    }
+  }
+
+  @Test
+  public void killedTabletServerDuringShutdown() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    TestIngest.Opts opts = new TestIngest.Opts();
+    TestIngest.ingest(c, opts, BWOPTS);
+    List<ProcessReference> procs = new ArrayList<ProcessReference>(cluster.getProcesses().get(ServerType.TABLET_SERVER));
+    cluster.killProcess(ServerType.TABLET_SERVER, procs.get(0));
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+  }
+  
+  @Test
+  public void shutdownDuringCompactingSplitting() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
+    c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+    TestIngest.Opts opts = new TestIngest.Opts();
+    TestIngest.ingest(c, opts, BWOPTS);
+    c.tableOperations().flush("test_ingest", null, null, false);
+    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+  }
+  
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
new file mode 100644
index 0000000..f60a8f0
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/RestartStressIT.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.*;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+public class RestartStressIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String, String> opts = new HashMap<String, String>();
+    opts.put(Property.TSERV_MAXMEM.getKey(), "5K");
+    opts.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
+    opts.put(Property.TSERV_WALOG_MAX_SIZE.getKey(), "50K");
+    cfg.setSiteConfig(opts);
+  }
+
+  private static final TestIngest.Opts IOPTS;
+  private static final VerifyIngest.Opts VOPTS;
+  static {
+    IOPTS = new TestIngest.Opts();
+    VOPTS = new VerifyIngest.Opts();
+    IOPTS.rows = VOPTS.rows = 100*1000;
+  }
+  private static final ScannerOpts SOPTS = new ScannerOpts();
+  
+  
+  @Test(timeout=120*1000)
+  public void test() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
+    Process ingest = cluster.exec(TestIngest.class, 
+        "-u", "root", "-p", MacTest.PASSWORD, 
+        "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), 
+        "--rows", "" + IOPTS.rows);
+    for (int i = 0; i < 5; i++) {
+      UtilWaitThread.sleep(10*1000);
+      cluster.killProcess(ServerType.TABLET_SERVER, cluster.getProcesses().get(ServerType.TABLET_SERVER).iterator().next());
+      cluster.start();
+    }
+    assertEquals(0, ingest.waitFor());
+    VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
index 3237fc1..cb4e2d7 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
@@ -38,7 +38,7 @@ import org.junit.Test;
 
 public class ServerSideErrorIT extends MacTest {
   
-  @Test
+  @Test(timeout=60*1000)
   public void run() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("tt");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
index 1f4a3fc..dda7c63 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
@@ -73,7 +73,6 @@ public class SparseColumnFamilyIT extends MacTest {
       scanner.setBatchSize(3);
       scanner.fetchColumnFamily(new Text(String.format("%03d", 1)));
       
-      long t1 = System.currentTimeMillis();
       Iterator<Entry<Key,Value>> iter = scanner.iterator();
       if (iter.hasNext()) {
         Entry<Key,Value> entry = iter.next();
@@ -81,10 +80,6 @@ public class SparseColumnFamilyIT extends MacTest {
           throw new Exception();
         }
       }
-      long t2 = System.currentTimeMillis();
-      
-      System.out.println("time " + (t2 - t1));
-      
     }
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
index c4719e0..e8a9d80 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
@@ -103,7 +103,7 @@ public class SplitIT extends MacTest {
     c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
     DeleteIT.deleteTest(c);
     c.tableOperations().flush("test_ingest", null, null, true);
-    UtilWaitThread.sleep(5*1000);
+    UtilWaitThread.sleep(10*1000);
     assertTrue(c.tableOperations().listSplits("test_ingest").size() > 30);
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
index 1d02d91..cce2af5 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
@@ -26,6 +26,7 @@ import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.TableOperations;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
@@ -45,12 +46,13 @@ public class TableIT extends MacTest {
   @Test(timeout = 60 * 1000)
   public void test() throws Exception {
     Connector c = getConnector();
-    c.tableOperations().create("test_ingest");
+    TableOperations to = c.tableOperations();
+    to.create("test_ingest");
     TestIngest.Opts opts = new TestIngest.Opts();
     TestIngest.ingest(c, opts, new BatchWriterOpts());
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
-    String id = c.tableOperations().tableIdMap().get("test_ingest");
+    String id = to.tableIdMap().get("test_ingest");
     Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     s.setRange(new KeyExtent(new Text(id), null, null).toMetadataRange());
     int count = 0;
@@ -61,7 +63,7 @@ public class TableIT extends MacTest {
     assertTrue(count > 0);
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
     assertTrue(fs.listStatus(new Path(cluster.getConfig().getDir() + "/accumulo/tables/" + id)).length > 0);
-    c.tableOperations().delete("test_ingest");
+    to.delete("test_ingest");
     count = 0;
     for (@SuppressWarnings("unused")
     Entry<Key,Value> entry : s) {
@@ -69,10 +71,11 @@ public class TableIT extends MacTest {
     }
     assertEquals(0, count);
     assertEquals(0, fs.listStatus(new Path(cluster.getConfig().getDir() + "/accumulo/tables/" + id)).length);
-    assertNull(c.tableOperations().tableIdMap().get("test_ingest"));
-    c.tableOperations().create("test_ingest");
+    assertNull(to.tableIdMap().get("test_ingest"));
+    to.create("test_ingest");
     TestIngest.ingest(c, opts, new BatchWriterOpts());
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+    to.delete("test_ingest");
     assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
index 2c3c86d..6c7fa75 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
@@ -66,9 +66,9 @@ public class TimeoutIT extends MacTest {
       bw.close();
       fail("batch writer did not timeout");
     } catch (MutationsRejectedException mre) {
-      if (!(mre.getCause() instanceof TimedOutException)) {
-        throw mre;
-      }
+      if (mre.getCause() instanceof TimedOutException)
+        return;
+      throw mre;
     }
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java b/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
index 7266430..51a45fa 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
@@ -46,7 +46,7 @@ import org.junit.Test;
 
 public class VisibilityIT extends MacTest {
   
-  @Test
+  @Test(timeout=30*1000)
   public void run() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("vt");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java b/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
new file mode 100644
index 0000000..124629f
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/WriteAheadLogIT.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.*;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.minicluster.ProcessReference;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+public class WriteAheadLogIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String, String> siteConfig = new HashMap<String, String>();
+    siteConfig.put(Property.TSERV_WALOG_MAX_SIZE.getKey(), "2M");
+    siteConfig.put(Property.GC_CYCLE_DELAY.getKey(), "1");
+    siteConfig.put(Property.GC_CYCLE_START.getKey(), "1");
+    siteConfig.put(Property.MASTER_RECOVERY_DELAY.getKey(), "0");
+    siteConfig.put(Property.TSERV_MAXMEM.getKey(), "200K");
+    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
+  }
+
+  @Test(timeout=60*1000)
+  public void test() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "750K");
+    TestIngest.Opts opts = new TestIngest.Opts();
+    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+    Map<ServerType,Collection<ProcessReference>> processes = cluster.getProcesses();
+    for (ProcessReference tserver : processes.get(ServerType.TABLET_SERVER)) {
+      cluster.killProcess(ServerType.TABLET_SERVER, tserver);
+    }
+    assertEquals(0, cluster.getProcesses().get(ServerType.TABLET_SERVER).size());
+    cluster.start();
+    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
new file mode 100644
index 0000000..f483ce9
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ZooCacheIT.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.*;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.junit.Test;
+
+public class ZooCacheIT extends MacTest {
+  
+  @Test(timeout=200*1000)
+  public void test() throws Exception {
+    assertEquals(0, cluster.exec(CacheTestClean.class, "/zcTest-42", "/tmp/zcTest-42").waitFor());
+    final AtomicReference<Exception> ref = new AtomicReference<Exception>();
+    List<Thread> threads = new ArrayList<Thread>();
+    for (int i = 0; i < 3; i++) {
+      Thread reader = new Thread() {
+        public void run() {
+          try {
+            CacheTestReader.main(new String[]{"/zcTest-42", "/tmp/zcTest-42", cluster.getZooKeepers()});
+          } catch(Exception ex) {
+            ref.set(ex);
+          }
+        }
+      };
+      reader.start();
+      threads.add(reader);
+    }
+    assertEquals(0, cluster.exec(CacheTestWriter.class, "/zcTest-42", "/tmp/zcTest-42", "3","500").waitFor());
+    for (Thread t: threads) {
+      t.join();
+      if (ref.get() != null)
+        throw ref.get();
+    }
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/fake_disk_failure.c
----------------------------------------------------------------------
diff --git a/test/system/auto/fake_disk_failure.c b/test/system/auto/fake_disk_failure.c
deleted file mode 100644
index ce76c9a..0000000
--- a/test/system/auto/fake_disk_failure.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-#include <unistd.h>
-#include <dlfcn.h>
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-
-static
-void test_pause() {
-  static char trickFile[1024] = "";
-  static char pid[10] = "";
-  if (trickFile[0] == '\0') {
-    strcpy(trickFile, getenv("HOME"));
-    strcat(trickFile, "/");
-    strcat(trickFile, "HOLD_IO_");
-    sprintf(pid, "%d", getpid());
-    strcat(trickFile, pid);
-  }
-
-  while (access(trickFile, R_OK) == 0) {
-    fprintf(stderr, "sleeping\n");
-    fflush(stderr);
-    sleep(1);
-  }
-}
-
-ssize_t write(int fd, const void *buf, size_t count) {
-  void * real_write = dlsym(RTLD_NEXT, "write");
-  ssize_t (*real_write_t)(int, const void*, size_t) = real_write;
-
-  test_pause();
-  return real_write_t(fd, buf, count);
-}
-
-ssize_t read(int fd, void *buf, size_t count) {
-  void * real_read = dlsym(RTLD_NEXT, "read");
-  ssize_t (*real_read_t)(int, void*, size_t) = real_read;
-  test_pause();
-  return real_read_t(fd, buf, count);
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/simple/masterFailover.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/masterFailover.py b/test/system/auto/simple/masterFailover.py
deleted file mode 100755
index 0d2eca1..0000000
--- a/test/system/auto/simple/masterFailover.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from simple.readwrite import SunnyDayTest
-
-import logging
-log = logging.getLogger('test.auto')
-
-import unittest
-
-class MasterFailover(SunnyDayTest):
-    "Test master automatic master fail-over"
-
-    order = 85
-
-    def start_master(self, host, safeMode=None):
-        goalState = 'NORMAL'
-        if safeMode:
-           goalState = 'SAFE_MODE'
-        self.wait(self.runOn('localhost',
-                             [self.accumulo_sh(),
-                              'org.apache.accumulo.server.master.state.SetGoalState',
-                              goalState]))
-        return self.runOn(host, [self.accumulo_sh(), 'master', 'dooomed'])
-
-    def runTest(self):
-         waitTime = self.waitTime()
-         self.waitForStop(self.ingester, waitTime)
-         handle = self.start_master(self.masterHost())
-         self.pkill(self.masterHost(), 'doomed')
-         self.sleep(2)
-         self.shutdown_accumulo()
-
-def suite():
-     result = unittest.TestSuite()
-     result.addTest(MasterFailover())
-     return result
- 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/simple/nativeMap.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/nativeMap.py b/test/system/auto/simple/nativeMap.py
deleted file mode 100755
index 1f154bd..0000000
--- a/test/system/auto/simple/nativeMap.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import unittest
-import time
-
-from TestUtils import TestUtilsMixin
-
-class NativeMapTest(TestUtilsMixin, unittest.TestCase):
-    "Native Map Unit Test"
-
-    order = 21
-    testClass=""
-
-    def setUp(self):
-        pass
-        
-    def runTest(self):
-        handle = self.runClassOn('localhost', 'org.apache.accumulo.test.functional.NativeMapTest', [])
-        self.waitForStop(handle, 20)
-
-    def tearDown(self):
-        pass
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(NativeMapTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/simple/shell.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/shell.py b/test/system/auto/simple/shell.py
deleted file mode 100755
index ae17ec7..0000000
--- a/test/system/auto/simple/shell.py
+++ /dev/null
@@ -1,474 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import time
-from TestUtils import TestUtilsMixin, ROOT, ROOT_PASSWORD, ACCUMULO_HOME
-from subprocess import Popen as BasePopen, PIPE
-
-log = logging.getLogger('test.shell')
-      
-class ShellTest(TestUtilsMixin,unittest.TestCase):
-    """Start a clean accumulo, and test different shell functions.
-    Some other shell functions are tests in the systemp and tablep tests"""
-    
-    command_list = [ "help", "tables", "table", "createtable", "deletetable", 
-                    "insert", "scan", "user", "users", "delete",
-                    "flush", "config", "setiter", "deleteiter", "whoami", "debug",
-                    "tablepermissions", "userpermissions", "authenticate", "createuser",
-                    "dropuser", "passwd", "setauths", "getauths", "grant", "revoke" ]
-    
-    def setUp(self):     
-        TestUtilsMixin.setUp(self)
-        
-    def runTest(self):
-        self.badLoginTest()
-        self.setIterTest()
-        self.setScanIterTest()
-        self.iteratorsTest()
-        self.createtableTestSplits()
-        self.createtableTestCopyConfig()
-        self.classpathTest()
-        self.tableTest()
-        self.configTest()
-        self.helpTest()
-        self.tablesTest()
-        self.createtableTest()
-        self.deletetableTest()
-        self.scanTest()
-        self.insertTest()
-        self.flushTest()
-        self.whoamiTest()
-        self.getauthsTest()
-        
-        
-    def badLoginTest(self, **opts):
-      log.debug("Running shell with bad password")
-      handle = self.runOn(self.masterHost(), [self.accumulo_sh(), 'shell', '-u', ROOT, '-p', "ThisWouldBeATerriblePasswordToHave"], stdin=PIPE, **opts)
-      handle.communicate("quit\n")
-      self.failUnless(handle.returncode != 0, "Was able to create a shell with bad credentials")
-
-    def setIterTest(self):
-        input = 'setiter -t setitertest -n mymax -scan -p 10 -class org.apache.accumulo.core.iterators.user.MaxCombiner\n\ncf\n\nSTRING\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.failUnless(out.find("TableNotFoundException") >= 0,
-                        "Was able to setiter a table that didn't exist")
-        input = 'createtable setitertest\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'table setitertest\nsetiter -n mymax -scan -p 10 -class org.apache.accumulo.core.iterators.user.MaxCombiner\n\ncf1\n\nSTRING\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'setiter -t setitertest -n mymax -scan -p 10 -class org.apache.accumulo.core.iterators.user.MinCombiner\n\ncf2\n\nSTRING\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.failUnless(out.find("IllegalArgumentException") >= 0,
-                        "Was able to configure same iter name twice")
-        input = 'setiter -t setitertest -n mymin -scan -p 10 -class org.apache.accumulo.core.iterators.user.MinCombiner\n\ncf2\n\nSTRING\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.failUnless(out.find("IllegalArgumentException") >= 0,
-                        "Was able to configure same priority twice")
-        input = 'setiter -t setitertest -n mymin -scan -p 11 -class org.apache.accumulo.core.iterators.user.MinCombiner\n\ncf2\n\nSTRING\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'table setitertest\ninsert row1 cf1 cq 10\ninsert row1 cf1 cq 30\ninsert row1 cf1 cq 20\ninsert row1 cf2 cq 10\ninsert row1 cf2 cq 30\nscan -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("row1 cf1:cq []    30") == -1 or out.find("row1 cf2:cq []    10") == -1,
-                        "SetIter Failed:  combining failed")
-        
-    def setScanIterTest(self):
-        input = 'createtable setscanitertest\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'table setscanitertest\ninsert row cf cq val1\ninsert row cf cq val2\nscan -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("row cf:cq []    val1") == 1 or out.find("row cf:cq []    val2") == -1,
-                        "SetScanIter Failed:  default versioning failed")
-        input = 'setscaniter -t setscanitertest -n vers -p 20 -class org.apache.accumulo.core.iterators.user.VersioningIterator\n2\ntable setscanitertest\nscan -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("row cf:cq []    val1") == -1 or out.find("row cf:cq []    val2") == -1,
-                        "SetScanIter Failed:  versioning override failed")
-        input = 'table setscanitertest\nsetscaniter -n vers -p 20 -class org.apache.accumulo.core.iterators.user.VersioningIterator\n2\nscan -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("row cf:cq []    val1") == -1 or out.find("row cf:cq []    val2") == -1,
-                        "SetScanIter Failed:  set on current table failed") 
-        input = 'setscaniter -t setscanitertest -n vers -p 20 -class org.apache.accumulo.core.iterators.user.VersioningIterator\n2\ndeletescaniter -t setscanitertest -n vers\ntable setscanitertest\nscan -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("row cf:cq []    val1") == 1 or out.find("row cf:cq []    val2") == -1,
-                        "SetScanIter Failed:  deletescaniter (single) failed")
-        input = 'table setscanitertest\nsetscaniter -n vers -p 20 -class org.apache.accumulo.core.iterators.user.VersioningIterator\n2\ndeletescaniter -n vers\ntable setscanitertest\nscan -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("row cf:cq []    val1") == 1 or out.find("row cf:cq []    val2") == -1,
-                        "SetScanIter Failed:  deletescaniter on current table failed")
-        input = 'setscaniter -t setscanitertest -n vers -p 20 -class org.apache.accumulo.core.iterators.user.VersioningIterator\n2\ndeletescaniter -t setscanitertest -a\ntable setscanitertest\nscan -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("row cf:cq []    val1") == 1 or out.find("row cf:cq []    val2") == -1,
-                        "SetScanIter Failed:  deletescaniter (all) failed")
-        input = 'setscaniter -t setscanitertest -n vers -p 20 -class org.apache.accumulo.core.iterators.user.VersioningIterator\n2\nsetscaniter -t setscanitertest -n vers -p 10 -class org.apache.accumulo.core.iterators.user.VersioningIterator\n2\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("IllegalArgumentException") >= 0,
-                        "Was able to configure same iter name twice")
-        input = 'setscaniter -t setscanitertest -n vers -p 20 -class org.apache.accumulo.core.iterators.user.VersioningIterator\n2\nsetscaniter -t setscanitertest -n vers2 -p 20 -class org.apache.accumulo.core.iterators.user.VersioningIterator\n2\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("IllegalArgumentException") >= 0,
-                        "Was able to configure same priority twice")
-        
-    def classpathTest(self):
-        input = 'classpath\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        lines = out.split()
-        for line in lines:
-            self.failUnless(line.startswith("file:") >= 0 or
-                            line.startswith("List of classpath items are:") >= 0, 
-                            "Classpath command: Command didn't work or classpath items were formatted incorrectly");
-        
-    def iteratorsTest(self):
-        input = 'createtable filtertest\nsetiter -t filtertest -n myfilter -scan -p 10 -class org.apache.accumulo.core.iterators.user.AgeOffFilter\n\n4000\n\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'config -t filtertest -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("table.iterator.scan.myfilter.opt.ttl") == -1, 
-                        "Config Failed:  Iterator doesn't exist in the config")
-        input = 'table filtertest\ninsert foo a b c\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'table filtertest\nscan\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("foo a:b") >= 0, "Scan Failed:  Entries don't exist")
-        # Wait until ageoff happens
-        self.sleep(5)
-        input = 'table filtertest\nscan\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("foo a:b") == -1, "Scan Failed:  Entries didn't ageoff")
-        input = 'deleteiter -t filtertest -n myfilter -scan\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'config filtertest -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("table.iterator.scan.myfilter.opt.ttl") == -1, 
-                        "Config Failed:  Iterator doesn't exist in the config")
-        input = 'table filtertest\nsetiter -n myfilter -scan -p 10 -class org.apache.accumulo.core.iterators.user.AgeOffFilter\n\n4000\n\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'config -t filtertest -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("table.iterator.scan.myfilter.opt.ttl") == -1,
-                        "Config Failed:  Iterator doesn't exist in the config")
-        input = 'table filtertest\ndeleteiter -n myfilter -scan\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'config filtertest -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("table.iterator.scan.myfilter.opt.ttl") == -1,
-                        "Config Failed:  Iterator doesn't exist in the config")
-        
-    def configTest(self):
-        cf_option = "table.scan.max.memory"
-        cf_value = "9361234"
-        input = 'createtable t1\nconfig -t t1 -s %s=%s -np\n' % (cf_option, cf_value)
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'config -t t1 -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        lines = out.split("\n")
-        foundConfig = False
-        foundOverride = False
-        for line in lines:
-            if foundConfig:
-                self.failUnless(line.startswith("table") and line.find("@override") >= 0 and line.find(cf_value),
-                                 "Error setting or retrieving config values")
-                foundOverride = True
-                break
-            if line.find(cf_option) >= 0:
-                foundConfig = True
-        self.failUnless(foundConfig and foundOverride, "Did not find the configuration that was set")
-        input = 'config -t t1 -d %s -np\n' % cf_option
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'config -t t1 -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        lines = out.split("\n")
-        for line in lines:
-            self.failIf(line.find(cf_value) >= 0, "Could not delete the value")
-        
-    def helpTest(self):
-        commands = self.command_list
-        input = "help -np\n"
-        startLooking = False
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        lines = out.split("\n")
-        for line in lines:
-            line = line.rstrip()
-            if startLooking:
-                command = line.split("-")[0].rstrip()
-                if not command.startswith("\t") and command in commands:
-                    commands.remove(command)
-            else:
-                if line[-10:] == "> help -np":
-                    startLooking = True
-        log.debug("missing commands:" + ", ".join(commands))
-        self.failIf(len(commands) > 0, "help command doesn't cover all the commands") 
-        
-    def tablesTest(self):
-        input = "tables\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("!METADATA"), 
-                        "tables command does not return the correct tables" )
-    
-    def tableTest(self):
-        input = "table !METADATA\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failUnless(out.split("\n")[-1].find("!METADATA >"), 
-                        "table command does not switch context to the table")
-        input = "table null\n"
-        out2, err2, code2 = self.rootShell(self.masterHost(), input)
-        self.failUnless(out2.find("TableNotFoundException") >= 0, 
-                        "Was able to connect to a table that didn't exist")
-        
-    
-    def createtableTest(self):
-        input = "createtable test_table\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        input = "tables\n"
-        out2, err2, code2 = self.rootShell(self.masterHost(), input)
-        self.processResult(out2, err2, code2)
-        self.failUnless(out2.find("test_table"), 
-                        "createtable command did not correctly create the table")
-        self.failUnless(out.split("\n")[-1].find("test_table >"), 
-                        "createtable command did not switch contexts to the new table")
-        
-    def createtableTestCopyConfig(self):
-        input = 'createtable cttest\nsetiter -t cttest -n myfilter -scan -p 10 -class org.apache.accumulo.core.iterators.user.AgeOffFilter\n\n2000\n\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'config -t cttest -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("table.iterator.scan.myfilter.opt.ttl") == -1, 
-                        "CreateTable Failed:  Iterator doesn't exist in the config")
-        input = 'createtable cttest2 -cc cttest\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        input = 'config -t cttest2 -np\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("table.iterator.scan.myfilter.opt.ttl") == -1, 
-                        "CreateTable Failed:  Iterator doesn't exist in the config after copying the table config")
-        
-    def createtableTestSplits(self):
-        splits_file = os.path.join(ACCUMULO_HOME, 'test','system','bench','lib','splits')
-        input = 'createtable splits_test -sf %s\n' % splits_file
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'table !METADATA\nscan\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        splitTestID = self.getTableId('splits_test')
-        splits = []
-        for a in out.split("\n"):
-            if a.startswith(splitTestID+';'):
-                split = a.split()[0].split(";",1)[1]
-                splits.append(split)
-        self.failUnless(len(splits) == 190*5, 
-                        "CreateTable Failed:  Splits were not created correctly")
-        input = 'createtable test_splits -cs splits_test\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'table !METADATA\nscan\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        testSplitsID = self.getTableId('test_splits')
-        for a in out.split("\n"):
-            if a.startswith(testSplitsID+';'):
-                split = a.split()[0].split(";",1)[1]
-                splits.remove(split)
-        self.failUnless(len(splits) == 0, 
-                        "CreateTable Failed:  Splits were not copied correctly")
-        input = 'createtable test_splits_2\naddsplits one\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'table !METADATA\nscan\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        splitTestID = self.getTableId('test_splits_2')
-        splits = []
-        for a in out.split("\n"):
-            if a.startswith(splitTestID+';'):
-                split = a.split()[0].split(";",1)[1]
-                splits.append(split)
-        log.debug(splits)
-        self.failUnless(len(splits) == 1*5 and splits[0] == 'one',
-                        "CreateTable Failed:  Splits were not created correctly (add one split)")
-        input = 'createtable test_splits_3\naddsplits -sf %s\n' % splits_file 
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        input = 'table !METADATA\nscan\n'
-        out,err,code = self.rootShell(self.masterHost(),input)
-        self.processResult(out, err, code)
-        splitTestID = self.getTableId('test_splits_3')
-        splits = []
-        for a in out.split("\n"):
-            if a.startswith(splitTestID+';'):
-                split = a.split()[0].split(";",1)[1]
-                splits.append(split)
-        self.failUnless(len(splits) == 190*5,
-                        "CreateTable Failed:  Splits were not created correctly (addsplits from file)")
-    
-    def deletetableTest(self):
-        create = "createtable test_delete_table\n"
-        out, err, code = self.rootShell(self.masterHost(), create)
-        self.processResult(out, err, code)
-        self.failUnless(out.split("\n")[-1].find("test_table >"), 
-                        "createtable command did not switch contexts to the new table")
-        delete = "deletetable -t test_delete_table\ny\n"
-        out, err, code = self.rootShell(self.masterHost(), delete)
-        self.processResult(out, err, code)
-        input = "tables\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("test_delete_table") >= 0, 
-                        "deletetable command did not delete the table" )
-        delete = "createtable test_delete_table1\ncreatetable test_delete_table2\ndeletetable -p test_delete_table.* -f\n"
-        out, err, code = self.rootShell(self.masterHost(), delete)
-        self.processResult(out, err, code)
-        input = "tables\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("test_delete_table") >= 0,
-                        "deletetable -p command did not delete the tables" )
-        delete = "createtable test_delete_table\ndeletetable\ny\n"
-        out, err, code = self.rootShell(self.masterHost(), delete)
-        self.processResult(out, err, code)
-        input = "tables\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("test_delete_table") >= 0,
-                        "deletetable command did not delete the current table" )
-        delete = "createtable test_delete_table\ndeletetable test_delete_table -f\n"
-        out, err, code = self.rootShell(self.masterHost(), delete)
-        self.processResult(out, err, code)
-        input = "tables\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("test_delete_table") >= 0,
-                        "deletetable command did not delete the current table" )
-        
-    def scanTest(self):
-        input = "createtable test_scan_table\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        input = "table test_scan_table\ninsert one two three four\nscan\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("one") >= 0 and out.find("two") >= 0 and 
-                        out.find("three") >= 0 and out.find("four") >= 0 and
-                        out.find("one") < out.find("two") < 
-                        out.find("three") < out.find("four"), 
-                                    "scan command did not return the correct results")
-        input = "table test_scan_table\ndelete one two three\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        input = "table test_scan_table\nscan\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failIf((out.find("one") >= 0) or (out.find("two") >= 0) or (out.find("three") >= 0),
-                                    "scan command did not return the correct results")
-        input = "table test_scan_table\ninsert one two three four -ts 42\nscan\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("one") >= 0 and out.find("two") >= 0 and
-                        out.find("three") >= 0 and out.find("four") >= 0 and
-                        out.find("one") < out.find("two") <
-                        out.find("three") < out.find("four"),
-                                    "scan command did not return the correct results")
-        input = "table test_scan_table\ndelete one two three -ts 42\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        input = "table test_scan_table\nscan\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failIf(out.find("one") >= 0 or out.find("two") >= 0 or out.find("three") >= 0,
-                                    "scan command did not return the correct results")
-        
-    def insertTest(self):
-        input = "createtable test_insert_table\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        input = "table test_insert_table\ninsert a b c d\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        
-    def flushTest(self):
-        input = "flush -t !METADATA -w\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("Flush of table !METADATA completed") >= 0, 
-                        "flush command did not flush the tables")
-        input = "flush !METADATA -w\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("Flush of table !METADATA completed") >= 0,
-                        "flush command did not flush the tables")
-        input = "table !METADATA\nflush -w\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("Flush of table !METADATA completed") >= 0,
-                        "flush command did not flush the tables")
-        
-    def whoamiTest(self):
-        input = "whoami\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("root") >= 0, 
-                        "whoami command did not return the correct values")
-    def getauthsTest(self):
-        passwd = 'secret'
-        input = "createuser test_user\n%s\n%s\nsetauths -u test_user -s 12,3,4\n" % (passwd, passwd)
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        input = "getauths -u test_user\n"
-        out, err, code = self.rootShell(self.masterHost(), input)
-        self.processResult(out, err, code)
-        self.failUnless(out.find("3") >= 0 and out.find("4") >= 0 and out.find("12") >= 0, 
-                        "getauths command did not return the correct values")
-        
-def suite():  
-    result = unittest.TestSuite()
-    result.addTest(ShellTest())
-    return result 


[50/50] [abbrv] git commit: ACCUMULO-1496 Fix warnings introduced by merge

Posted by ct...@apache.org.
ACCUMULO-1496 Fix warnings introduced by merge


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/f5324a22
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/f5324a22
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/f5324a22

Branch: refs/heads/ACCUMULO-1496
Commit: f5324a22725a8aac2bb8883264caa1705db955c5
Parents: 964e761
Author: Christopher Tubbs <ct...@apache.org>
Authored: Tue Jul 16 16:07:11 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Tue Jul 16 16:07:11 2013 -0400

----------------------------------------------------------------------
 .../src/main/java/org/apache/accumulo/server/monitor/Monitor.java  | 1 -
 server/src/main/java/org/apache/accumulo/server/util/Admin.java    | 1 -
 .../main/java/org/apache/accumulo/server/util/ZooKeeperMain.java   | 2 --
 3 files changed, 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/f5324a22/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java b/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
index 6bca1a3..faf6e04 100644
--- a/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
+++ b/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
@@ -74,7 +74,6 @@ import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.util.EmbeddedWebServer;
 import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.accumulo.trace.instrument.Tracer;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/f5324a22/server/src/main/java/org/apache/accumulo/server/util/Admin.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
index 88dca6f..9813ba0 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -22,7 +22,6 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/f5324a22/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java b/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
index 730f421..80fbe5b 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
@@ -16,13 +16,11 @@
  */
 package org.apache.accumulo.server.util;
 
-import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
-import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;


[30/50] [abbrv] git commit: ACCUMULO-1537 put IT output into files, make java programs identifiable using "ps"

Posted by ct...@apache.org.
ACCUMULO-1537 put IT output into files, make java programs identifiable using "ps"

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1502278 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/fb839df6
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/fb839df6
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/fb839df6

Branch: refs/heads/ACCUMULO-1496
Commit: fb839df66d3834957b1338e751be1e92f89b3eb0
Parents: c72a194
Author: Eric C. Newton <ec...@apache.org>
Authored: Thu Jul 11 16:36:22 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Thu Jul 11 16:36:22 2013 +0000

----------------------------------------------------------------------
 .../java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java | 2 +-
 pom.xml                                                           | 3 +++
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/fb839df6/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index ea29b92..16b1381 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -143,7 +143,7 @@ public class MiniAccumuloCluster {
     String className = clazz.getCanonicalName();
 
     ArrayList<String> argList = new ArrayList<String>();
-    argList.addAll(Arrays.asList(javaBin, "-cp", classpath));
+    argList.addAll(Arrays.asList(javaBin, "-Dproc=" + clazz.getSimpleName(), "-cp", classpath));
     argList.add("-Djava.library.path=" + config.getLibDir());
     argList.addAll(extraJvmOpts);
     argList.addAll(Arrays.asList("-XX:+UseConcMarkSweepGC", "-XX:CMSInitiatingOccupancyFraction=75", Main.class.getName(), className));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/fb839df6/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 04ca5d7..37791e4 100644
--- a/pom.xml
+++ b/pom.xml
@@ -632,6 +632,9 @@
               <goal>integration-test</goal>
             </goals>
             <phase>integration-test</phase>
+          	<configuration>
+              <redirectTestOutputToFile>true</redirectTestOutputToFile>
+          	</configuration>
           </execution>
           <execution>
             <id>verify-integration-tests</id>


[14/50] [abbrv] ACCUMULO-1481 : Add tests for splitting/merging root table; refactor to consolidate metadata constants and structures in an organized way; begin consolidating metadata ops into a servicer interface to abstract the code that actually does

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java b/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java
new file mode 100644
index 0000000..0b088f5
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/TableMetadataServicer.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+import java.util.SortedMap;
+import java.util.SortedSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.CredentialHelper;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.hadoop.io.Text;
+
+/**
+ * A {@link MetadataServicer} that is backed by a table
+ */
+abstract class TableMetadataServicer extends MetadataServicer {
+  
+  private Instance instance;
+  private TCredentials credentials;
+  private String tableIdBeingServiced;
+  private String serviceTableName;
+  
+  public TableMetadataServicer(Instance instance, TCredentials credentials, String serviceTableName, String tableIdBeingServiced) {
+    this.instance = instance;
+    this.credentials = credentials;
+    this.serviceTableName = serviceTableName;
+    this.tableIdBeingServiced = tableIdBeingServiced;
+  }
+  
+  @Override
+  public String getServicedTableId() {
+    return tableIdBeingServiced;
+  }
+  
+  public String getServicingTableName() {
+    return serviceTableName;
+  }
+  
+  @Override
+  public void getTabletLocations(SortedMap<KeyExtent,String> tablets) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    
+    Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(getServicingTableName(),
+        Authorizations.EMPTY);
+    
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+    
+    // position at first entry in metadata table for given table
+    scanner.setRange(TabletsSection.getRange(getServicedTableId()));
+    
+    Text colf = new Text();
+    Text colq = new Text();
+    
+    KeyExtent currentKeyExtent = null;
+    String location = null;
+    Text row = null;
+    // acquire this table's tablets from the metadata table which services it
+    for (Entry<Key,Value> entry : scanner) {
+      if (row != null) {
+        if (!row.equals(entry.getKey().getRow())) {
+          currentKeyExtent = null;
+          location = null;
+          row = entry.getKey().getRow();
+        }
+      } else {
+        row = entry.getKey().getRow();
+      }
+      
+      colf = entry.getKey().getColumnFamily(colf);
+      colq = entry.getKey().getColumnQualifier(colq);
+      
+      if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(colf, colq)) {
+        currentKeyExtent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
+        tablets.put(currentKeyExtent, location);
+        currentKeyExtent = null;
+        location = null;
+      } else if (colf.equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
+        location = entry.getValue().toString();
+      }
+      
+    }
+    
+    validateEntries(tablets);
+  }
+  
+  private void validateEntries(SortedMap<KeyExtent,String> tablets) throws AccumuloException {
+    SortedSet<KeyExtent> tabletsKeys = (SortedSet<KeyExtent>) tablets.keySet();
+    // sanity check of metadata table entries
+    // make sure tablets has no holes, and that it starts and ends w/ null
+    if (tabletsKeys.size() == 0)
+      throw new AccumuloException("No entries found in metadata table for table " + getServicedTableId());
+    
+    if (tabletsKeys.first().getPrevEndRow() != null)
+      throw new AccumuloException("Problem with metadata table, first entry for table " + getServicedTableId() + "- " + tabletsKeys.first()
+          + " - has non null prev end row");
+    
+    if (tabletsKeys.last().getEndRow() != null)
+      throw new AccumuloException("Problem with metadata table, last entry for table " + getServicedTableId() + "- " + tabletsKeys.first()
+          + " - has non null end row");
+    
+    Iterator<KeyExtent> tabIter = tabletsKeys.iterator();
+    Text lastEndRow = tabIter.next().getEndRow();
+    while (tabIter.hasNext()) {
+      KeyExtent tabke = tabIter.next();
+      
+      if (tabke.getPrevEndRow() == null)
+        throw new AccumuloException("Problem with metadata table, it has null prev end row in middle of table " + tabke);
+      
+      if (!tabke.getPrevEndRow().equals(lastEndRow))
+        throw new AccumuloException("Problem with metadata table, it has a hole " + tabke.getPrevEndRow() + " != " + lastEndRow);
+      
+      lastEndRow = tabke.getEndRow();
+    }
+    
+    // end METADATA table sanity check
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
new file mode 100644
index 0000000..d3323a4
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata.schema;
+
+public class DataFileValue {
+  private long size;
+  private long numEntries;
+  private long time = -1;
+  
+  public DataFileValue(long size, long numEntries, long time) {
+    this.size = size;
+    this.numEntries = numEntries;
+    this.time = time;
+  }
+  
+  public DataFileValue(long size, long numEntries) {
+    this.size = size;
+    this.numEntries = numEntries;
+    this.time = -1;
+  }
+  
+  public DataFileValue(byte[] encodedDFV) {
+    String[] ba = new String(encodedDFV).split(",");
+    
+    size = Long.parseLong(ba[0]);
+    numEntries = Long.parseLong(ba[1]);
+    
+    if (ba.length == 3)
+      time = Long.parseLong(ba[2]);
+    else
+      time = -1;
+  }
+  
+  public long getSize() {
+    return size;
+  }
+  
+  public long getNumEntries() {
+    return numEntries;
+  }
+  
+  public boolean isTimeSet() {
+    return time >= 0;
+  }
+  
+  public long getTime() {
+    return time;
+  }
+  
+  public byte[] encode() {
+    if (time >= 0)
+      return ("" + size + "," + numEntries + "," + time).getBytes();
+    return ("" + size + "," + numEntries).getBytes();
+  }
+  
+  @Override
+  public boolean equals(Object o) {
+    if (o instanceof DataFileValue) {
+      DataFileValue odfv = (DataFileValue) o;
+      
+      return size == odfv.size && numEntries == odfv.numEntries;
+    }
+    
+    return false;
+  }
+  
+  @Override
+  public int hashCode() {
+    return Long.valueOf(size + numEntries).hashCode();
+  }
+  
+  @Override
+  public String toString() {
+    return size + " " + numEntries;
+  }
+  
+  public void setTime(long time) {
+    if (time < 0)
+      throw new IllegalArgumentException();
+    this.time = time;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
new file mode 100644
index 0000000..4c2b6f8
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/schema/MetadataSchema.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata.schema;
+
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.util.ColumnFQ;
+import org.apache.hadoop.io.Text;
+
+/**
+ * Describes the table schema used for metadata tables
+ */
+public class MetadataSchema {
+  
+  private static final String RESERVED_PREFIX = "~";
+  
+  private static class Section {
+    private String rowPrefix;
+    private Range range;
+    
+    private Section(String startRow, boolean startInclusive, String endRow, boolean endInclusive) {
+      rowPrefix = startRow;
+      range = new Range(startRow, startInclusive, endRow, endInclusive);
+    }
+  }
+  
+  /**
+   * Used for storing information about tablets
+   */
+  public static class TabletsSection {
+    private static final Section section = new Section(null, false, RESERVED_PREFIX, false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static Range getRange(String tableId) {
+      return new Range(new Key(tableId + ';'), true, new Key(tableId + '<').followingKey(PartialKey.ROW), false);
+    }
+    
+    public static Text getRow(Text tableId, Text endRow) {
+      Text entry = new Text(tableId);
+      
+      if (endRow == null) {
+        // append delimiter for default tablet
+        entry.append(new byte[] {'<'}, 0, 1);
+      } else {
+        // append delimiter for regular tablets
+        entry.append(new byte[] {';'}, 0, 1);
+        entry.append(endRow.getBytes(), 0, endRow.getLength());
+      }
+      
+      return entry;
+    }
+    
+    /**
+     * Column family for storing the tablet information needed by clients
+     */
+    public static class TabletColumnFamily {
+      /**
+       * This needs to sort after all other column families for that tablet, because the {@link PREV_ROW_COLUMN} sits in this and that needs to sort last
+       * because the {@link SimpleGarbageCollector} relies on this.
+       */
+      public static final Text NAME = new Text("~tab");
+      /**
+       * README : very important that prevRow sort last to avoid race conditions between garbage collector and split this needs to sort after everything else
+       * for that tablet
+       */
+      public static final ColumnFQ PREV_ROW_COLUMN = new ColumnFQ(NAME, new Text("~pr"));
+      /**
+       * A temporary field in case a split fails and we need to roll back
+       */
+      public static final ColumnFQ OLD_PREV_ROW_COLUMN = new ColumnFQ(NAME, new Text("oldprevrow"));
+      /**
+       * A temporary field for splits to optimize certain operations
+       */
+      public static final ColumnFQ SPLIT_RATIO_COLUMN = new ColumnFQ(NAME, new Text("splitRatio"));
+    }
+    
+    /**
+     * Column family for recording information used by the TServer
+     */
+    public static class ServerColumnFamily {
+      public static final Text NAME = new Text("srv");
+      /**
+       * Holds the location of the tablet in the DFS file system
+       */
+      public static final ColumnFQ DIRECTORY_COLUMN = new ColumnFQ(NAME, new Text("dir"));
+      /**
+       * Holds the {@link TimeType}
+       */
+      public static final ColumnFQ TIME_COLUMN = new ColumnFQ(NAME, new Text("time"));
+      /**
+       * Holds flush IDs to enable waiting on a flush to complete
+       */
+      public static final ColumnFQ FLUSH_COLUMN = new ColumnFQ(NAME, new Text("flush"));
+      /**
+       * Holds compact IDs to enable waiting on a compaction to complete
+       */
+      public static final ColumnFQ COMPACT_COLUMN = new ColumnFQ(NAME, new Text("compact"));
+      /**
+       * Holds lock IDs to enable a sanity check to ensure that the TServer writing to the metadata tablet is not dead
+       */
+      public static final ColumnFQ LOCK_COLUMN = new ColumnFQ(NAME, new Text("lock"));
+    }
+    
+    /**
+     * Column family for storing entries created by the TServer to indicate it has loaded a tablet that it was assigned
+     */
+    public static class CurrentLocationColumnFamily {
+      public static final Text NAME = new Text("loc");
+    }
+    
+    /**
+     * Column family for storing the assigned location
+     */
+    public static class FutureLocationColumnFamily {
+      public static final Text NAME = new Text("future");
+    }
+    
+    /**
+     * Column family for storing last location, as a hint for assignment
+     */
+    public static class LastLocationColumnFamily {
+      public static final Text NAME = new Text("last");
+    }
+    
+    /**
+     * Temporary markers that indicate a tablet loaded a bulk file
+     */
+    public static class BulkFileColumnFamily {
+      public static final Text NAME = new Text("loaded");
+    }
+    
+    /**
+     * Temporary marker that indicates a tablet was successfully cloned
+     */
+    public static class ClonedColumnFamily {
+      public static final Text NAME = new Text("!cloned");
+    }
+    
+    /**
+     * Column family for storing files used by a tablet
+     */
+    public static class DataFileColumnFamily {
+      public static final Text NAME = new Text("file");
+    }
+    
+    /**
+     * Column family for storing the set of files scanned with an isolated scanner, to prevent them from being deleted
+     */
+    public static class ScanFileColumnFamily {
+      public static final Text NAME = new Text("scan");
+    }
+    
+    /**
+     * Column family for storing write-ahead log entries
+     */
+    public static class LogColumnFamily {
+      public static final Text NAME = new Text("log");
+    }
+    
+    /**
+     * Column family for indicating that the files in a tablet have been trimmed to only include data for the current tablet, so that they are safe to merge
+     */
+    public static class ChoppedColumnFamily {
+      public static final Text NAME = new Text("chopped");
+      public static final ColumnFQ CHOPPED_COLUMN = new ColumnFQ(NAME, new Text("chopped"));
+    }
+  }
+  
+  /**
+   * Contains additional metadata in a reserved area not for tablets
+   */
+  public static class ReservedSection {
+    private static final Section section = new Section(RESERVED_PREFIX, true, null, false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static String getRowPrefix() {
+      return section.rowPrefix;
+    }
+    
+  }
+  
+  /**
+   * Holds delete markers for potentially unused files/directories
+   */
+  public static class DeletesSection {
+    private static final Section section = new Section(RESERVED_PREFIX + "del", true, RESERVED_PREFIX + "dem", false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static String getRowPrefix() {
+      return section.rowPrefix;
+    }
+    
+  }
+  
+  /**
+   * Holds bulk-load-in-progress processing flags
+   */
+  public static class BlipSection {
+    private static final Section section = new Section(RESERVED_PREFIX + "blip", true, RESERVED_PREFIX + "bliq", false);
+    
+    public static Range getRange() {
+      return section.range;
+    }
+    
+    public static String getRowPrefix() {
+      return section.rowPrefix;
+    }
+    
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java b/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java
index 7cf1c6f..8826bb1 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/ColumnFQ.java
@@ -69,30 +69,6 @@ public class ColumnFQ implements Comparable<ColumnFQ> {
     m.putDelete(colf, colq);
   }
   
-  /**
-   * @deprecated since 1.5, use {@link #fetch(ScannerBase)} instead
-   */
-  @Deprecated
-  public static void fetch(ScannerBase sb, ColumnFQ cfq) {
-    sb.fetchColumn(cfq.colf, cfq.colq);
-  }
-  
-  /**
-   * @deprecated since 1.5, use {@link #put(Mutation, Value)} instead
-   */
-  @Deprecated
-  public static void put(Mutation m, ColumnFQ cfq, Value v) {
-    m.put(cfq.colf, cfq.colq, v);
-  }
-  
-  /**
-   * @deprecated since 1.5, use {@link #putDelete(Mutation)} instead
-   */
-  @Deprecated
-  public static void putDelete(Mutation m, ColumnFQ cfq) {
-    m.putDelete(cfq.colf, cfq.colq);
-  }
-
   @Override
   public boolean equals(Object o) {
     if (!(o instanceof ColumnFQ))

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/Merge.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/Merge.java b/core/src/main/java/org/apache/accumulo/core/util/Merge.java
index bad43bb..b1d0205 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/Merge.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/Merge.java
@@ -31,6 +31,9 @@ import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.io.Text;
@@ -216,8 +219,8 @@ public class Merge {
       throw new MergeException(e);
     }
     scanner.setRange(new KeyExtent(new Text(tableId), end, start).toMetadataRange());
-    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
-    MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
     final Iterator<Entry<Key,Value>> iterator = scanner.iterator();
     
     Iterator<Size> result = new Iterator<Size>() {
@@ -233,12 +236,12 @@ public class Merge {
         while (iterator.hasNext()) {
           Entry<Key,Value> entry = iterator.next();
           Key key = entry.getKey();
-          if (key.getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+          if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
             String[] sizeEntries = new String(entry.getValue().get()).split(",");
             if (sizeEntries.length == 2) {
               tabletSize += Long.parseLong(sizeEntries[0]);
             }
-          } else if (MetadataTable.PREV_ROW_COLUMN.hasColumns(key)) {
+          } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
             KeyExtent extent = new KeyExtent(key.getRow(), entry.getValue());
             return new Size(extent, tabletSize);
           }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/MetadataTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/MetadataTable.java b/core/src/main/java/org/apache/accumulo/core/util/MetadataTable.java
deleted file mode 100644
index 98bc13d..0000000
--- a/core/src/main/java/org/apache/accumulo/core/util/MetadataTable.java
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.util;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.CredentialHelper;
-import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.hadoop.io.Text;
-
-public class MetadataTable {
-  
-  public static final String ID = "!0";
-  public static final String NAME = "!METADATA";
-  
-  /**
-   * Initial tablet directory name
-   */
-  public static final String TABLE_TABLET_LOCATION = "/table_info";
-  
-  /**
-   * Reserved keyspace is any row that begins with a tilde '~' character
-   */
-  public static final Key RESERVED_RANGE_START_KEY = new Key(new Text(new byte[] {'~'}));
-  public static final Range NON_ROOT_KEYSPACE = new Range(null, false, RESERVED_RANGE_START_KEY, false);
-  public static final Range KEYSPACE = new Range(new Key(new Text(ID)), true, RESERVED_RANGE_START_KEY, false);
-  public static final Range DELETED_RANGE = new Range(new Key(new Text("~del")), true, new Key(new Text("~dem")), false);
-  public static final String BLIP_FLAG_PREFIX = "~blip"; // BLIP = bulk load in progress
-  public static final Range BLIP_KEYSPACE = new Range(new Key(new Text(BLIP_FLAG_PREFIX)), true, new Key(new Text("~bliq")), false);
-  
-  public static final Text CURRENT_LOCATION_COLUMN_FAMILY = new Text("loc");
-  public static final Text FUTURE_LOCATION_COLUMN_FAMILY = new Text("future");
-  public static final Text LAST_LOCATION_COLUMN_FAMILY = new Text("last");
-  /**
-   * Temporary marker that indicates a tablet loaded a bulk file
-   */
-  public static final Text BULKFILE_COLUMN_FAMILY = new Text("loaded");
-  
-  /**
-   * Temporary marker that indicates a tablet was successfully cloned
-   */
-  public static final Text CLONED_COLUMN_FAMILY = new Text("!cloned");
-  
-  /**
-   * This needs to sort after all other column families for that tablet, because the {@link #PREV_ROW_COLUMN} sits in this and that needs to sort last because
-   * the {@link SimpleGarbageCollector} relies on this.
-   */
-  public static final Text TABLET_COLUMN_FAMILY = new Text("~tab");
-  
-  /**
-   * README : very important that prevRow sort last to avoid race conditions between garbage collector and split this needs to sort after everything else for
-   * that tablet
-   */
-  public static final ColumnFQ PREV_ROW_COLUMN = new ColumnFQ(TABLET_COLUMN_FAMILY, new Text("~pr"));
-  public static final ColumnFQ OLD_PREV_ROW_COLUMN = new ColumnFQ(TABLET_COLUMN_FAMILY, new Text("oldprevrow"));
-  public static final ColumnFQ SPLIT_RATIO_COLUMN = new ColumnFQ(TABLET_COLUMN_FAMILY, new Text("splitRatio"));
-  
-  public static final Text SERVER_COLUMN_FAMILY = new Text("srv");
-  public static final ColumnFQ DIRECTORY_COLUMN = new ColumnFQ(SERVER_COLUMN_FAMILY, new Text("dir"));
-  public static final ColumnFQ TIME_COLUMN = new ColumnFQ(SERVER_COLUMN_FAMILY, new Text("time"));
-  public static final ColumnFQ FLUSH_COLUMN = new ColumnFQ(SERVER_COLUMN_FAMILY, new Text("flush"));
-  public static final ColumnFQ COMPACT_COLUMN = new ColumnFQ(SERVER_COLUMN_FAMILY, new Text("compact"));
-  public static final ColumnFQ LOCK_COLUMN = new ColumnFQ(SERVER_COLUMN_FAMILY, new Text("lock"));
-  
-  public static final Text DATAFILE_COLUMN_FAMILY = new Text("file");
-  public static final Text SCANFILE_COLUMN_FAMILY = new Text("scan");
-  public static final Text LOG_COLUMN_FAMILY = new Text("log");
-  public static final Text CHOPPED_COLUMN_FAMILY = new Text("chopped");
-  public static final ColumnFQ CHOPPED_COLUMN = new ColumnFQ(CHOPPED_COLUMN_FAMILY, new Text("chopped"));
-  
-  public static class DataFileValue {
-    private long size;
-    private long numEntries;
-    private long time = -1;
-    
-    public DataFileValue(long size, long numEntries, long time) {
-      this.size = size;
-      this.numEntries = numEntries;
-      this.time = time;
-    }
-    
-    public DataFileValue(long size, long numEntries) {
-      this.size = size;
-      this.numEntries = numEntries;
-      this.time = -1;
-    }
-    
-    public DataFileValue(byte[] encodedDFV) {
-      String[] ba = new String(encodedDFV).split(",");
-      
-      size = Long.parseLong(ba[0]);
-      numEntries = Long.parseLong(ba[1]);
-      
-      if (ba.length == 3)
-        time = Long.parseLong(ba[2]);
-      else
-        time = -1;
-    }
-    
-    public long getSize() {
-      return size;
-    }
-    
-    public long getNumEntries() {
-      return numEntries;
-    }
-    
-    public boolean isTimeSet() {
-      return time >= 0;
-    }
-    
-    public long getTime() {
-      return time;
-    }
-    
-    public byte[] encode() {
-      if (time >= 0)
-        return ("" + size + "," + numEntries + "," + time).getBytes();
-      return ("" + size + "," + numEntries).getBytes();
-    }
-    
-    @Override
-    public boolean equals(Object o) {
-      if (o instanceof DataFileValue) {
-        DataFileValue odfv = (DataFileValue) o;
-        
-        return size == odfv.size && numEntries == odfv.numEntries;
-      }
-      
-      return false;
-    }
-    
-    @Override
-    public int hashCode() {
-      return Long.valueOf(size + numEntries).hashCode();
-    }
-    
-    @Override
-    public String toString() {
-      return size + " " + numEntries;
-    }
-    
-    public void setTime(long time) {
-      if (time < 0)
-        throw new IllegalArgumentException();
-      this.time = time;
-    }
-  }
-  
-  public static Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>> getMetadataLocationEntries(SortedMap<Key,Value> entries) {
-    Key key;
-    Value val;
-    Text location = null;
-    Value prevRow = null;
-    KeyExtent ke;
-    
-    SortedMap<KeyExtent,Text> results = new TreeMap<KeyExtent,Text>();
-    ArrayList<KeyExtent> locationless = new ArrayList<KeyExtent>();
-    
-    Text lastRowFromKey = new Text();
-    
-    // text obj below is meant to be reused in loop for efficiency
-    Text colf = new Text();
-    Text colq = new Text();
-    
-    for (Entry<Key,Value> entry : entries.entrySet()) {
-      key = entry.getKey();
-      val = entry.getValue();
-      
-      if (key.compareRow(lastRowFromKey) != 0) {
-        prevRow = null;
-        location = null;
-        key.getRow(lastRowFromKey);
-      }
-      
-      colf = key.getColumnFamily(colf);
-      colq = key.getColumnQualifier(colq);
-      
-      // interpret the row id as a key extent
-      if (colf.equals(CURRENT_LOCATION_COLUMN_FAMILY) || colf.equals(FUTURE_LOCATION_COLUMN_FAMILY)) {
-        if (location != null) {
-          throw new IllegalStateException("Tablet has multiple locations : " + lastRowFromKey);
-        }
-        location = new Text(val.toString());
-      } else if (PREV_ROW_COLUMN.equals(colf, colq)) {
-        prevRow = new Value(val);
-      }
-      
-      if (prevRow != null) {
-        ke = new KeyExtent(key.getRow(), prevRow);
-        if (location != null)
-          results.put(ke, location);
-        else
-          locationless.add(ke);
-        
-        location = null;
-        prevRow = null;
-      }
-    }
-    
-    return new Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>>(results, locationless);
-  }
-  
-  public static SortedMap<Text,SortedMap<ColumnFQ,Value>> getTabletEntries(SortedMap<Key,Value> tabletKeyValues, List<ColumnFQ> columns) {
-    TreeMap<Text,SortedMap<ColumnFQ,Value>> tabletEntries = new TreeMap<Text,SortedMap<ColumnFQ,Value>>();
-    
-    HashSet<ColumnFQ> colSet = null;
-    if (columns != null) {
-      colSet = new HashSet<ColumnFQ>(columns);
-    }
-    
-    for (Entry<Key,Value> entry : tabletKeyValues.entrySet()) {
-      
-      if (columns != null && !colSet.contains(new ColumnFQ(entry.getKey()))) {
-        continue;
-      }
-      
-      Text row = entry.getKey().getRow();
-      
-      SortedMap<ColumnFQ,Value> colVals = tabletEntries.get(row);
-      if (colVals == null) {
-        colVals = new TreeMap<ColumnFQ,Value>();
-        tabletEntries.put(row, colVals);
-      }
-      
-      colVals.put(new ColumnFQ(entry.getKey()), entry.getValue());
-    }
-    
-    return tabletEntries;
-  }
-  
-  public static void getEntries(Instance instance, TCredentials credentials, String table, boolean isTid, Map<KeyExtent,String> locations,
-      SortedSet<KeyExtent> tablets) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    String tableId = isTid ? table : Tables.getNameToIdMap(instance).get(table);
-    
-    String systemTableToRead = tableId.equals(ID) ? RootTable.NAME : NAME;
-    Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(systemTableToRead,
-        Authorizations.EMPTY);
-    
-    PREV_ROW_COLUMN.fetch(scanner);
-    scanner.fetchColumnFamily(CURRENT_LOCATION_COLUMN_FAMILY);
-    
-    // position at first entry in metadata table for given table
-    KeyExtent ke = new KeyExtent(new Text(tableId), new Text(), null);
-    Key startKey = new Key(ke.getMetadataEntry());
-    ke = new KeyExtent(new Text(tableId), null, null);
-    Key endKey = new Key(ke.getMetadataEntry()).followingKey(PartialKey.ROW);
-    scanner.setRange(new Range(startKey, endKey));
-    
-    Text colf = new Text();
-    Text colq = new Text();
-    
-    KeyExtent currentKeyExtent = null;
-    String location = null;
-    Text row = null;
-    // acquire this tables METADATA table entries
-    boolean haveExtent = false;
-    boolean haveLocation = false;
-    for (Entry<Key,Value> entry : scanner) {
-      if (row != null) {
-        if (!row.equals(entry.getKey().getRow())) {
-          currentKeyExtent = null;
-          haveExtent = false;
-          haveLocation = false;
-          row = entry.getKey().getRow();
-        }
-      } else
-        row = entry.getKey().getRow();
-      
-      colf = entry.getKey().getColumnFamily(colf);
-      colq = entry.getKey().getColumnQualifier(colq);
-      
-      // stop scanning metadata table when another table is reached
-      if (!(new KeyExtent(entry.getKey().getRow(), (Text) null)).getTableId().toString().equals(tableId))
-        break;
-      
-      if (PREV_ROW_COLUMN.equals(colf, colq)) {
-        currentKeyExtent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
-        tablets.add(currentKeyExtent);
-        haveExtent = true;
-      } else if (colf.equals(CURRENT_LOCATION_COLUMN_FAMILY)) {
-        location = entry.getValue().toString();
-        haveLocation = true;
-      }
-      
-      if (haveExtent && haveLocation) {
-        locations.put(currentKeyExtent, location);
-        haveExtent = false;
-        haveLocation = false;
-        currentKeyExtent = null;
-      }
-    }
-    
-    validateEntries(tableId, tablets);
-  }
-  
-  public static void validateEntries(String tableId, SortedSet<KeyExtent> tablets) throws AccumuloException {
-    // sanity check of metadata table entries
-    // make sure tablets has no holes, and that it starts and ends w/ null
-    if (tablets.size() == 0)
-      throw new AccumuloException("No entries found in metadata table for table " + tableId);
-    
-    if (tablets.first().getPrevEndRow() != null)
-      throw new AccumuloException("Problem with metadata table, first entry for table " + tableId + "- " + tablets.first() + " - has non null prev end row");
-    
-    if (tablets.last().getEndRow() != null)
-      throw new AccumuloException("Problem with metadata table, last entry for table " + tableId + "- " + tablets.first() + " - has non null end row");
-    
-    Iterator<KeyExtent> tabIter = tablets.iterator();
-    Text lastEndRow = tabIter.next().getEndRow();
-    while (tabIter.hasNext()) {
-      KeyExtent tabke = tabIter.next();
-      
-      if (tabke.getPrevEndRow() == null)
-        throw new AccumuloException("Problem with metadata table, it has null prev end row in middle of table " + tabke);
-      
-      if (!tabke.getPrevEndRow().equals(lastEndRow))
-        throw new AccumuloException("Problem with metadata table, it has a hole " + tabke.getPrevEndRow() + " != " + lastEndRow);
-      
-      lastEndRow = tabke.getEndRow();
-    }
-    
-    // end METADATA table sanity check
-  }
-  
-  public static boolean isContiguousRange(KeyExtent ke, SortedSet<KeyExtent> children) {
-    if (children.size() == 0)
-      return false;
-    
-    if (children.size() == 1)
-      return children.first().equals(ke);
-    
-    Text per = children.first().getPrevEndRow();
-    Text er = children.last().getEndRow();
-    
-    boolean perEqual = (per == ke.getPrevEndRow() || per != null && ke.getPrevEndRow() != null && ke.getPrevEndRow().compareTo(per) == 0);
-    
-    boolean erEqual = (er == ke.getEndRow() || er != null && ke.getEndRow() != null && ke.getEndRow().compareTo(er) == 0);
-    
-    if (!perEqual || !erEqual)
-      return false;
-    
-    Iterator<KeyExtent> iter = children.iterator();
-    
-    Text lastEndRow = iter.next().getEndRow();
-    
-    while (iter.hasNext()) {
-      KeyExtent cke = iter.next();
-      
-      per = cke.getPrevEndRow();
-      
-      // something in the middle should not be null
-      
-      if (per == null || lastEndRow == null)
-        return false;
-      
-      if (per.compareTo(lastEndRow) != 0)
-        return false;
-      
-      lastEndRow = cke.getEndRow();
-    }
-    
-    return true;
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/RootTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/RootTable.java b/core/src/main/java/org/apache/accumulo/core/util/RootTable.java
deleted file mode 100644
index 1209110..0000000
--- a/core/src/main/java/org/apache/accumulo/core/util/RootTable.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.util;
-
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.accumulo.core.data.Range;
-import org.apache.hadoop.io.Text;
-
-/**
- * 
- */
-public class RootTable {
-  
-  public static final String ID = "!!R";
-  public static final String NAME = "!!ROOT";
-  
-  public static final String ROOT_TABLET_LOCATION = "/root_tablet";
-  
-  public static final String ZROOT_TABLET = ROOT_TABLET_LOCATION;
-  public static final String ZROOT_TABLET_LOCATION = ZROOT_TABLET + "/location";
-  public static final String ZROOT_TABLET_FUTURE_LOCATION = ZROOT_TABLET + "/future_location";
-  public static final String ZROOT_TABLET_LAST_LOCATION = ZROOT_TABLET + "/lastlocation";
-  public static final String ZROOT_TABLET_WALOGS = ZROOT_TABLET + "/walogs";
-  
-  public static final KeyExtent EXTENT = new KeyExtent(new Text(ID), null, null);
-  public static final Range METADATA_TABLETS_RANGE = new Range(null, false, MetadataTable.RESERVED_RANGE_START_KEY, false);
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java
index 17e220b..6bfdce2 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/FlushCommand.java
@@ -19,7 +19,7 @@ package org.apache.accumulo.core.util.shell.commands;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.shell.Shell;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
index 469d91b..4093fa4 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/GetSplitsCommand.java
@@ -30,9 +30,10 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.util.format.BinaryFormatter;
 import org.apache.accumulo.core.util.shell.Shell;
@@ -72,14 +73,14 @@ public class GetSplitsCommand extends Command {
       } else {
         String systemTableToCheck = MetadataTable.NAME.equals(tableName) ? RootTable.NAME : MetadataTable.NAME;
         final Scanner scanner = shellState.getConnector().createScanner(systemTableToCheck, Authorizations.EMPTY);
-        MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
         final Text start = new Text(shellState.getConnector().tableOperations().tableIdMap().get(tableName));
         final Text end = new Text(start);
         end.append(new byte[] {'<'}, 0, 1);
         scanner.setRange(new Range(start, end));
         for (Iterator<Entry<Key,Value>> iterator = scanner.iterator(); iterator.hasNext();) {
           final Entry<Key,Value> next = iterator.next();
-          if (MetadataTable.PREV_ROW_COLUMN.hasColumns(next.getKey())) {
+          if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(next.getKey())) {
             KeyExtent extent = new KeyExtent(next.getKey().getRow(), next.getValue());
             final String pr = encode(encode, extent.getPrevEndRow());
             final String er = encode(encode, extent.getEndRow());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java
index 0365a39..70de3d4 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OfflineCommand.java
@@ -19,7 +19,7 @@ package org.apache.accumulo.core.util.shell.commands;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.shell.Shell;
 
 public class OfflineCommand extends TableOperation {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java
index c6a2eff..5ffbe3a 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/commands/OnlineCommand.java
@@ -19,7 +19,7 @@ package org.apache.accumulo.core.util.shell.commands;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.shell.Shell;
 
 public class OnlineCommand extends TableOperation {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java b/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
index 7ffec0f..f160cb3 100644
--- a/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/client/impl/TabletLocatorImplTest.java
@@ -47,10 +47,12 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataLocationObtainer;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.hadoop.io.Text;
 
 public class TabletLocatorImplTest extends TestCase {
@@ -454,7 +456,7 @@ public class TabletLocatorImplTest extends TestCase {
     public Connector getConnector(org.apache.accumulo.core.security.thrift.AuthInfo auth) throws AccumuloException, AccumuloSecurityException {
       return getConnector(auth.user, auth.getPassword());
     }
-
+    
     @Override
     public Connector getConnector(String principal, AuthenticationToken token) throws AccumuloException, AccumuloSecurityException {
       throw new UnsupportedOperationException();
@@ -474,7 +476,8 @@ public class TabletLocatorImplTest extends TestCase {
     }
     
     @Override
-    public TabletLocations lookupTablet(TabletLocation src, Text row, Text stopRow, TabletLocator parent, TCredentials credentials) throws AccumuloSecurityException {
+    public TabletLocations lookupTablet(TabletLocation src, Text row, Text stopRow, TabletLocator parent, TCredentials credentials)
+        throws AccumuloSecurityException {
       
       // System.out.println("lookupTablet("+src+","+row+","+stopRow+","+ parent+")");
       // System.out.println(tservers);
@@ -503,7 +506,7 @@ public class TabletLocatorImplTest extends TestCase {
       
       SortedMap<Key,Value> results = tabletData.tailMap(startKey).headMap(stopKey);
       
-      Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>> metadata = MetadataTable.getMetadataLocationEntries(results);
+      Pair<SortedMap<KeyExtent,Text>,List<KeyExtent>> metadata = MetadataLocationObtainer.getMetadataLocationEntries(results);
       
       for (Entry<KeyExtent,Text> entry : metadata.getFirst().entrySet()) {
         list.add(new TabletLocation(entry.getKey(), entry.getValue().toString()));
@@ -559,7 +562,7 @@ public class TabletLocatorImplTest extends TestCase {
       if (failures.size() > 0)
         parent.invalidateCache(failures);
       
-      SortedMap<KeyExtent,Text> metadata = MetadataTable.getMetadataLocationEntries(results).getFirst();
+      SortedMap<KeyExtent,Text> metadata = MetadataLocationObtainer.getMetadataLocationEntries(results).getFirst();
       
       for (Entry<KeyExtent,Text> entry : metadata.entrySet()) {
         list.add(new TabletLocation(entry.getKey(), entry.getValue().toString()));
@@ -606,18 +609,19 @@ public class TabletLocatorImplTest extends TestCase {
     if (location != null) {
       if (instance == null)
         instance = "";
-      Key lk = new Key(mr, MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text(instance));
+      Key lk = new Key(mr, TabletsSection.CurrentLocationColumnFamily.NAME, new Text(instance));
       tabletData.put(lk, new Value(location.getBytes()));
     }
     
-    Key pk = new Key(mr, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(), MetadataTable.PREV_ROW_COLUMN.getColumnQualifier());
+    Key pk = new Key(mr, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
+        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier());
     tabletData.put(pk, per);
   }
   
   static void setLocation(TServers tservers, String server, KeyExtent tablet, KeyExtent ke, String location) {
     setLocation(tservers, server, tablet, ke, location, "");
   }
-
+  
   static void deleteServer(TServers tservers, String server) {
     tservers.tservers.remove(server);
     
@@ -1274,7 +1278,6 @@ public class TabletLocatorImplTest extends TestCase {
     } catch (Exception e) {
       
     }
-
-
+    
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java b/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
new file mode 100644
index 0000000..63fe434
--- /dev/null
+++ b/core/src/test/java/org/apache/accumulo/core/metadata/MetadataServicerTest.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.core.metadata;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.mock.MockInstance;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.security.CredentialHelper;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.junit.Test;
+
+public class MetadataServicerTest {
+  
+  @Test
+  public void checkSystemTableIdentifiers() {
+    assertNotEquals(RootTable.ID, MetadataTable.ID);
+    assertNotEquals(RootTable.NAME, MetadataTable.NAME);
+  }
+  
+  @Test
+  public void testGetCorrectServicer() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException {
+    String userTableName = "A";
+    MockInstance instance = new MockInstance("metadataTest");
+    Connector connector = instance.getConnector("root", new PasswordToken(""));
+    connector.tableOperations().create(userTableName);
+    String userTableId = connector.tableOperations().tableIdMap().get(userTableName);
+    TCredentials credentials = CredentialHelper.createSquelchError("root", new PasswordToken(""), instance.getInstanceID());
+    
+    MetadataServicer ms = MetadataServicer.forTableId(instance, credentials, RootTable.ID);
+    assertTrue(ms instanceof ServicerForRootTable);
+    assertFalse(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableId(instance, credentials, MetadataTable.ID);
+    assertTrue(ms instanceof ServicerForMetadataTable);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(MetadataTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableId(instance, credentials, userTableId);
+    assertTrue(ms instanceof ServicerForUserTables);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(MetadataTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(userTableId, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableName(instance, credentials, RootTable.NAME);
+    assertTrue(ms instanceof ServicerForRootTable);
+    assertFalse(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableName(instance, credentials, MetadataTable.NAME);
+    assertTrue(ms instanceof ServicerForMetadataTable);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(RootTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(MetadataTable.ID, ms.getServicedTableId());
+    
+    ms = MetadataServicer.forTableName(instance, credentials, userTableName);
+    assertTrue(ms instanceof ServicerForUserTables);
+    assertTrue(ms instanceof TableMetadataServicer);
+    assertEquals(MetadataTable.NAME, ((TableMetadataServicer) ms).getServicingTableName());
+    assertEquals(userTableId, ms.getServicedTableId());
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/core/src/test/java/org/apache/accumulo/core/util/MetadataTableTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/util/MetadataTableTest.java b/core/src/test/java/org/apache/accumulo/core/util/MetadataTableTest.java
deleted file mode 100644
index 7b942bf..0000000
--- a/core/src/test/java/org/apache/accumulo/core/util/MetadataTableTest.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.core.util;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.data.KeyExtent;
-import org.apache.hadoop.io.Text;
-import org.junit.Test;
-
-public class MetadataTableTest {
-  
-  @Test
-  public void checkSystemTableIdentifiers() {
-    assertNotEquals(RootTable.ID, MetadataTable.ID);
-    assertNotEquals(RootTable.NAME, MetadataTable.NAME);
-  }
-  
-  private KeyExtent createKeyExtent(String tname, String er, String per) {
-    return new KeyExtent(new Text(tname), er == null ? null : new Text(er), per == null ? null : new Text(per));
-  }
-  
-  private SortedSet<KeyExtent> createKeyExtents(String data[][]) {
-    
-    TreeSet<KeyExtent> extents = new TreeSet<KeyExtent>();
-    for (String[] exdata : data) {
-      extents.add(createKeyExtent(exdata[0], exdata[1], exdata[2]));
-    }
-    
-    return extents;
-  }
-  
-  private void runTest(String beginRange, String endRange) {
-    KeyExtent ke = createKeyExtent("foo", endRange, beginRange);
-    
-    SortedSet<KeyExtent> children = createKeyExtents(new String[][] {new String[] {"foo", endRange, beginRange}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", endRange, "r1"}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", endRange, "r2"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", (endRange == null ? "r2" : endRange + "Z"), "r1"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", (beginRange == null ? "r0" : "a" + beginRange)},
-        new String[] {"foo", endRange, "r1"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", "r1"}, new String[] {"foo", endRange, "r2"}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", "r1"}, new String[] {"foo", endRange, "r1"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", null}, new String[] {"foo", endRange, "r2"}});
-    
-    assertFalse(MetadataTable.isContiguousRange(ke, children));
-    
-    if (endRange == null) {
-      children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", null, "r1"},
-          new String[] {"foo", endRange, "r2"}});
-      
-      assertFalse(MetadataTable.isContiguousRange(ke, children));
-    }
-    
-    children = createKeyExtents(new String[][] {new String[] {"foo", "r1", beginRange}, new String[] {"foo", "r2", "r1"}, new String[] {"foo", "r3", "r2"},
-        new String[] {"foo", endRange, "r3"}});
-    
-    assertTrue(MetadataTable.isContiguousRange(ke, children));
-    
-  }
-  
-  @Test
-  public void testICR1() {
-    runTest(null, null);
-    runTest(null, "r4");
-    runTest("r0", null);
-    runTest("r0", "r4");
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java
----------------------------------------------------------------------
diff --git a/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java b/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java
index c70e110..b98cf31 100644
--- a/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java
+++ b/proxy/src/test/java/org/apache/accumulo/proxy/SimpleTest.java
@@ -50,7 +50,7 @@ import org.apache.accumulo.core.iterators.DevNull;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.user.SummingCombiner;
 import org.apache.accumulo.core.iterators.user.VersioningIterator;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.examples.simple.constraints.NumericValueConstraint;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
@@ -147,7 +147,6 @@ public class SimpleTest {
     props.put("tokenClass", PasswordToken.class.getName());
     
     protocolClass = getRandomProtocol();
-    System.out.println(protocolClass.getName());
     
     proxyPort = PortUtils.getRandomFreePort();
     proxyServer = Proxy.createProxyServer(org.apache.accumulo.proxy.thrift.AccumuloProxy.class, org.apache.accumulo.proxy.ProxyServer.class, proxyPort,
@@ -1001,14 +1000,14 @@ public class SimpleTest {
     client.closeScanner(scanner);
     assertEquals(10, more.getResults().size());
     client.deleteTable(creds, "test2");
-
+    
     // don't know how to test this, call it just for fun
     client.clearLocatorCache(creds, TABLE_TEST);
-
+    
     // compact
     client.compactTable(creds, TABLE_TEST, null, null, null, true, true);
     assertEquals(1, countFiles(TABLE_TEST));
-
+    
     // get disk usage
     client.cloneTable(creds, TABLE_TEST, "test2", true, null, null);
     Set<String> tablesToScan = new HashSet<String>();
@@ -1028,7 +1027,7 @@ public class SimpleTest {
     assertEquals(1, diskUsage.get(2).getTables().size());
     client.deleteTable(creds, "foo");
     client.deleteTable(creds, "test2");
-
+    
     // export/import
     String dir = folder.getRoot() + "/test";
     String destDir = folder.getRoot() + "/test_dest";

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/ServerConstants.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/ServerConstants.java b/server/src/main/java/org/apache/accumulo/server/ServerConstants.java
index 699fdd8..3765cce 100644
--- a/server/src/main/java/org/apache/accumulo/server/ServerConstants.java
+++ b/server/src/main/java/org/apache/accumulo/server/ServerConstants.java
@@ -17,9 +17,9 @@
 package org.apache.accumulo.server;
 
 import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -28,8 +28,12 @@ public class ServerConstants {
   
   // versions should never be negative
   public static final Integer WIRE_VERSION = 2;
-  public static final int DATA_VERSION = 5;
-  public static final int PREV_DATA_VERSION = 4;
+  
+  /**
+   * current version reflects the addition of a separate root table (ACCUMULO-1481)
+   */
+  public static final int DATA_VERSION = 6;
+  public static final int PREV_DATA_VERSION = 5;
   
   // these are functions to delay loading the Accumulo configuration unless we must
   public static String[] getBaseDirs() {
@@ -92,6 +96,6 @@ public class ServerConstants {
   }
   
   public static String getRootTabletDir() {
-    return prefix(getRootTableDirs(), RootTable.ZROOT_TABLET)[0];
+    return prefix(getRootTableDirs(), RootTable.ROOT_TABLET_LOCATION)[0];
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java b/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
index 0c647ea..b1bb894 100644
--- a/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
+++ b/server/src/main/java/org/apache/accumulo/server/client/BulkImporter.java
@@ -51,11 +51,11 @@ import org.apache.accumulo.core.data.thrift.TKeyExtent;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.file.FileSKVIterator;
 import org.apache.accumulo.core.file.FileUtil;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.tabletserver.thrift.TabletClientService;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.LoggingRunnable;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.util.StopWatch;
 import org.apache.accumulo.core.util.ThriftUtil;
@@ -132,7 +132,7 @@ public class BulkImporter {
     }
     
     ClientService.Client client = null;
-    final TabletLocator locator = TabletLocator.getInstance(instance, new Text(tableId));
+    final TabletLocator locator = TabletLocator.getLocator(instance, new Text(tableId));
     
     try {
       final Map<Path,List<TabletLocation>> assignments = Collections.synchronizedSortedMap(new TreeMap<Path,List<TabletLocation>>());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java b/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
index 7a50dae..db5ece0 100644
--- a/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
+++ b/server/src/main/java/org/apache/accumulo/server/client/HdfsZooInstance.java
@@ -32,11 +32,11 @@ import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ByteBufferUtil;
 import org.apache.accumulo.core.util.OpTimer;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.StringUtil;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.core.zookeeper.ZooUtil;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
index 03cbde3..ce5e5e4 100644
--- a/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++ b/server/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
@@ -28,9 +28,15 @@ import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ClonedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
 import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 import org.apache.accumulo.server.client.HdfsZooInstance;
@@ -55,14 +61,15 @@ public class MetadataConstraints implements Constraint {
     }
   }
   
-  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<ColumnFQ>(Arrays.asList(new ColumnFQ[] {MetadataTable.PREV_ROW_COLUMN,
-      MetadataTable.OLD_PREV_ROW_COLUMN, MetadataTable.DIRECTORY_COLUMN, MetadataTable.SPLIT_RATIO_COLUMN, MetadataTable.TIME_COLUMN,
-      MetadataTable.LOCK_COLUMN, MetadataTable.FLUSH_COLUMN, MetadataTable.COMPACT_COLUMN}));
+  private static final HashSet<ColumnFQ> validColumnQuals = new HashSet<ColumnFQ>(Arrays.asList(new ColumnFQ[] {
+      TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN,
+      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN, TabletsSection.ServerColumnFamily.TIME_COLUMN,
+      TabletsSection.ServerColumnFamily.LOCK_COLUMN, TabletsSection.ServerColumnFamily.FLUSH_COLUMN, TabletsSection.ServerColumnFamily.COMPACT_COLUMN}));
   
-  private static final HashSet<Text> validColumnFams = new HashSet<Text>(Arrays.asList(new Text[] {MetadataTable.BULKFILE_COLUMN_FAMILY,
-      MetadataTable.LOG_COLUMN_FAMILY, MetadataTable.SCANFILE_COLUMN_FAMILY, MetadataTable.DATAFILE_COLUMN_FAMILY,
-      MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, MetadataTable.LAST_LOCATION_COLUMN_FAMILY, MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY,
-      MetadataTable.CHOPPED_COLUMN_FAMILY, MetadataTable.CLONED_COLUMN_FAMILY}));
+  private static final HashSet<Text> validColumnFams = new HashSet<Text>(Arrays.asList(new Text[] {TabletsSection.BulkFileColumnFamily.NAME,
+      LogColumnFamily.NAME, ScanFileColumnFamily.NAME, DataFileColumnFamily.NAME,
+      TabletsSection.CurrentLocationColumnFamily.NAME, TabletsSection.LastLocationColumnFamily.NAME, TabletsSection.FutureLocationColumnFamily.NAME,
+      ChoppedColumnFamily.NAME, ClonedColumnFamily.NAME}));
   
   private static boolean isValidColumn(ColumnUpdate cu) {
     
@@ -78,19 +85,20 @@ public class MetadataConstraints implements Constraint {
   static private ArrayList<Short> addViolation(ArrayList<Short> lst, int violation) {
     if (lst == null)
       lst = new ArrayList<Short>();
-    lst.add((short)violation);
+    lst.add((short) violation);
     return lst;
   }
   
   static private ArrayList<Short> addIfNotPresent(ArrayList<Short> lst, int intViolation) {
     if (lst == null)
       return addViolation(lst, intViolation);
-    short violation = (short)intViolation;
+    short violation = (short) intViolation;
     if (!lst.contains(violation))
       return addViolation(lst, intViolation);
     return lst;
   }
   
+  @Override
   public List<Short> check(Environment env, Mutation mutation) {
     
     ArrayList<Short> violations = null;
@@ -144,7 +152,7 @@ public class MetadataConstraints implements Constraint {
     }
     
     boolean checkedBulk = false;
-
+    
     for (ColumnUpdate columnUpdate : colUpdates) {
       Text columnFamily = new Text(columnUpdate.getColumnFamily());
       
@@ -155,11 +163,11 @@ public class MetadataConstraints implements Constraint {
         continue;
       }
       
-      if (columnUpdate.getValue().length == 0 && !columnFamily.equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
+      if (columnUpdate.getValue().length == 0 && !columnFamily.equals(ScanFileColumnFamily.NAME)) {
         violations = addViolation(violations, 6);
       }
       
-      if (columnFamily.equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+      if (columnFamily.equals(DataFileColumnFamily.NAME)) {
         try {
           DataFileValue dfv = new DataFileValue(columnUpdate.getValue());
           
@@ -171,33 +179,33 @@ public class MetadataConstraints implements Constraint {
         } catch (ArrayIndexOutOfBoundsException aiooe) {
           violations = addViolation(violations, 1);
         }
-      } else if (columnFamily.equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
+      } else if (columnFamily.equals(ScanFileColumnFamily.NAME)) {
         
-      } else if (columnFamily.equals(MetadataTable.BULKFILE_COLUMN_FAMILY)) {
+      } else if (columnFamily.equals(TabletsSection.BulkFileColumnFamily.NAME)) {
         if (!columnUpdate.isDeleted() && !checkedBulk) {
           // splits, which also write the time reference, are allowed to write this reference even when
           // the transaction is not running because the other half of the tablet is holding a reference
           // to the file.
           boolean isSplitMutation = false;
-          // When a tablet is assigned, it re-writes the metadata.  It should probably only update the location information, 
-          // but it writes everything.  We allow it to re-write the bulk information if it is setting the location. 
-          // See ACCUMULO-1230. 
+          // When a tablet is assigned, it re-writes the metadata. It should probably only update the location information,
+          // but it writes everything. We allow it to re-write the bulk information if it is setting the location.
+          // See ACCUMULO-1230.
           boolean isLocationMutation = false;
           
           HashSet<Text> dataFiles = new HashSet<Text>();
           HashSet<Text> loadedFiles = new HashSet<Text>();
-
+          
           String tidString = new String(columnUpdate.getValue());
           int otherTidCount = 0;
-
+          
           for (ColumnUpdate update : mutation.getUpdates()) {
-            if (new ColumnFQ(update).equals(MetadataTable.DIRECTORY_COLUMN)) {
+            if (new ColumnFQ(update).equals(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN)) {
               isSplitMutation = true;
-            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
               isLocationMutation = true;
-            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(DataFileColumnFamily.NAME)) {
               dataFiles.add(new Text(update.getColumnQualifier()));
-            } else if (new Text(update.getColumnFamily()).equals(MetadataTable.BULKFILE_COLUMN_FAMILY)) {
+            } else if (new Text(update.getColumnFamily()).equals(TabletsSection.BulkFileColumnFamily.NAME)) {
               loadedFiles.add(new Text(update.getColumnQualifier()));
               
               if (!new String(update.getValue()).equals(tidString)) {
@@ -223,7 +231,7 @@ public class MetadataConstraints implements Constraint {
       } else {
         if (!isValidColumn(columnUpdate)) {
           violations = addViolation(violations, 2);
-        } else if (new ColumnFQ(columnUpdate).equals(MetadataTable.PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
+        } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN) && columnUpdate.getValue().length > 0
             && (violations == null || !violations.contains((short) 4))) {
           KeyExtent ke = new KeyExtent(new Text(mutation.getRow()), (Text) null);
           
@@ -234,7 +242,7 @@ public class MetadataConstraints implements Constraint {
           if (!prevEndRowLessThanEndRow) {
             violations = addViolation(violations, 3);
           }
-        } else if (new ColumnFQ(columnUpdate).equals(MetadataTable.LOCK_COLUMN)) {
+        } else if (new ColumnFQ(columnUpdate).equals(TabletsSection.ServerColumnFamily.LOCK_COLUMN)) {
           if (zooCache == null) {
             zooCache = new ZooCache();
           }
@@ -263,7 +271,8 @@ public class MetadataConstraints implements Constraint {
     if (violations != null) {
       log.debug("violating metadata mutation : " + new String(mutation.getRow()));
       for (ColumnUpdate update : mutation.getUpdates()) {
-        log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value " + (update.isDeleted() ? "[delete]" : new String(update.getValue())));
+        log.debug(" update: " + new String(update.getColumnFamily()) + ":" + new String(update.getColumnQualifier()) + " value "
+            + (update.isDeleted() ? "[delete]" : new String(update.getValue())));
       }
     }
     
@@ -273,7 +282,8 @@ public class MetadataConstraints implements Constraint {
   protected Arbitrator getArbitrator() {
     return new ZooArbitrator();
   }
-
+  
+  @Override
   public String getViolationDescription(short violationCode) {
     switch (violationCode) {
       case 1:
@@ -296,6 +306,7 @@ public class MetadataConstraints implements Constraint {
     return null;
   }
   
+  @Override
   protected void finalize() {
     if (zooCache != null)
       zooCache.clear();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
index d88a85c..d50cff2 100644
--- a/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
+++ b/server/src/main/java/org/apache/accumulo/server/gc/GarbageCollectWriteAheadLogs.java
@@ -42,8 +42,8 @@ import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.util.AddressUtil;
-import org.apache.accumulo.server.util.MetadataTable;
-import org.apache.accumulo.server.util.MetadataTable.LogEntry;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.server.util.MetadataTableUtil.LogEntry;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.accumulo.trace.instrument.Span;
 import org.apache.accumulo.trace.instrument.Trace;
@@ -223,7 +223,7 @@ public class GarbageCollectWriteAheadLogs {
   private static int removeMetadataEntries(Map<Path,String> fileToServerMap, Set<Path> sortedWALogs, GCStatus status) throws IOException, KeeperException,
       InterruptedException {
     int count = 0;
-    Iterator<LogEntry> iterator = MetadataTable.getLogEntries(SecurityConstants.getSystemCredentials());
+    Iterator<LogEntry> iterator = MetadataTableUtil.getLogEntries(SecurityConstants.getSystemCredentials());
     while (iterator.hasNext()) {
       for (String filename : iterator.next().logSet) {
         Path path;


[41/50] [abbrv] git commit: ACCUMULO-1537 close batch scanners in tests

Posted by ct...@apache.org.
ACCUMULO-1537 close batch scanners in tests

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1502639 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/6c4bfc71
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/6c4bfc71
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/6c4bfc71

Branch: refs/heads/ACCUMULO-1496
Commit: 6c4bfc716dba6631cc6be623bf46dba7c7542420
Parents: 37d2fdb
Author: Eric C. Newton <ec...@apache.org>
Authored: Fri Jul 12 17:56:31 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Fri Jul 12 17:56:31 2013 +0000

----------------------------------------------------------------------
 .../java/org/apache/accumulo/test/functional/ExamplesIT.java    | 5 ++++-
 .../org/apache/accumulo/test/functional/ServerSideErrorIT.java  | 4 +++-
 2 files changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/6c4bfc71/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
index 48300fb..6c846b4 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ExamplesIT.java
@@ -26,6 +26,7 @@ import java.util.List;
 import java.util.Map.Entry;
 
 import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.BatchScanner;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
 import org.apache.accumulo.core.client.Connector;
@@ -157,7 +158,9 @@ public class ExamplesIT extends MacTest {
     bw = c.createBatchWriter("shard", bwc);
     Index.index(30, new File(System.getProperty("user.dir") + "/src"), "\\W+", bw);
     bw.close();
-    List<String> found = Query.query(c.createBatchScanner("shard", Authorizations.EMPTY, 4), Arrays.asList("foo", "bar"));
+    BatchScanner bs = c.createBatchScanner("shard", Authorizations.EMPTY, 4);
+    List<String> found = Query.query(bs, Arrays.asList("foo", "bar"));
+    bs.close();
     // should find ourselves
     boolean thisFile = false;
     for (String file : found) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/6c4bfc71/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
index cb4e2d7..0293ae8 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
@@ -79,10 +79,12 @@ public class ServerSideErrorIT extends MacTest {
       for (Entry<Key,Value> entry : bs) {
         entry.getKey();
       }
-      bs.close();
     } catch (Exception e) {
       caught = true;
+    } finally {
+      bs.close();
     }
+    
     if (!caught)
       throw new Exception("batch scan did not fail");
     


[28/50] [abbrv] git commit: ACCUMULO-1537 fix race condition for scan timeouts

Posted by ct...@apache.org.
ACCUMULO-1537 fix race condition for scan timeouts

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1501884 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/0b6b7349
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/0b6b7349
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/0b6b7349

Branch: refs/heads/ACCUMULO-1496
Commit: 0b6b7349c865500d8cbc7b6ca21c5668b2bb8841
Parents: 22cff66
Author: Eric C. Newton <ec...@apache.org>
Authored: Wed Jul 10 18:00:21 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Wed Jul 10 18:00:21 2013 +0000

----------------------------------------------------------------------
 .../java/org/apache/accumulo/test/functional/TimeoutIT.java | 9 +++------
 1 file changed, 3 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/0b6b7349/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
index 6c7fa75..138584a 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
@@ -42,7 +42,7 @@ import org.junit.Test;
  */
 public class TimeoutIT extends MacTest {
   
-  @Test(timeout=30*1000)
+  @Test(timeout=60*1000)
   public void run() throws Exception {
     Connector conn = getConnector();
     testBatchWriterTimeout(conn);
@@ -84,11 +84,9 @@ public class TimeoutIT extends MacTest {
     m.put("cf1", "cq4", "v4");
     
     bw.addMutation(m);
-    
     bw.close();
     
     BatchScanner bs = getConnector().createBatchScanner("timeout", Authorizations.EMPTY, 2);
-    bs.setTimeout(1, TimeUnit.SECONDS);
     bs.setRanges(Collections.singletonList(new Range()));
     
     // should not timeout
@@ -96,10 +94,10 @@ public class TimeoutIT extends MacTest {
       entry.getKey();
     }
     
+    bs.setTimeout(5, TimeUnit.SECONDS);
     IteratorSetting iterSetting = new IteratorSetting(100, SlowIterator.class);
     iterSetting.addOption("sleepTime", 2000 + "");
-    getConnector().tableOperations().attachIterator("timeout", iterSetting);
-    UtilWaitThread.sleep(250);
+    bs.addScanIterator(iterSetting);
     
     try {
       for (Entry<Key,Value> entry : bs) {
@@ -109,7 +107,6 @@ public class TimeoutIT extends MacTest {
     } catch (TimedOutException toe) {
       // toe.printStackTrace();
     }
-    
     bs.close();
   }
   


[16/50] [abbrv] git commit: ACCUMULO-1537 started the stress tests, fixed some tests, made others more reliable

Posted by ct...@apache.org.
ACCUMULO-1537 started the stress tests, fixed some tests, made others more reliable

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1499562 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/390ca3f2
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/390ca3f2
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/390ca3f2

Branch: refs/heads/ACCUMULO-1496
Commit: 390ca3f2ebe75585ed8c97512e9985711f5d641d
Parents: 446a37a
Author: Eric C. Newton <ec...@apache.org>
Authored: Wed Jul 3 21:21:48 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Wed Jul 3 21:21:48 2013 +0000

----------------------------------------------------------------------
 .../accumulo/test/TestMultiTableIngest.java     |   4 +-
 .../test/functional/CreateManyScannersTest.java |  51 ---------
 .../test/functional/BigRootTabletIT.java        |  63 +++++++++++
 .../accumulo/test/functional/BinaryIT.java      |   2 +-
 .../test/functional/BinaryStressIT.java         |  55 ++++++++++
 .../accumulo/test/functional/BloomFilterIT.java |  20 ++--
 .../test/functional/ChaoticBlancerIT.java       |  65 +++++++++++
 .../test/functional/CreateManyScannersIT.java   |  34 ++++++
 .../test/functional/FunctionalTestUtils.java    |   8 ++
 .../test/functional/MetadataMaxFiles.java       | 108 +++++++++++++++++++
 .../accumulo/test/functional/ReadWriteIT.java   |   2 +-
 .../accumulo/test/functional/SplitIT.java       |   8 +-
 .../accumulo/test/functional/TimeoutIT.java     |   2 +-
 .../accumulo/test/functional/WriteLotsIT.java   |  66 ++++++++++++
 test/system/auto/stress/batchWrite.py           |  77 -------------
 test/system/auto/stress/bigRootTablet.py        |  50 ---------
 test/system/auto/stress/binary.py               |  52 ---------
 test/system/auto/stress/manyScanners.py         |  31 ------
 test/system/auto/stress/metadataMaxFiles.py     |  66 ------------
 19 files changed, 419 insertions(+), 345 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java b/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
index b051988..1458065 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
@@ -54,7 +54,7 @@ public class TestMultiTableIngest {
       scanner.setBatchSize(scanOpts.scanBatchSize);
       int count = i;
       for (Entry<Key,Value> elt : scanner) {
-        String expected = String.format("%05d", count);
+        String expected = String.format("%06d", count);
         if (!elt.getKey().getRow().toString().equals(expected))
           throw new RuntimeException("entry " + elt + " does not match expected " + expected + " in table " + table);
         count += tableNames.size();
@@ -96,7 +96,7 @@ public class TestMultiTableIngest {
       
       // populate
       for (int i = 0; i < opts.count; i++) {
-        Mutation m = new Mutation(new Text(String.format("%05d", i)));
+        Mutation m = new Mutation(new Text(String.format("%06d", i)));
         m.put(new Text("col" + Integer.toString((i % 3) + 1)), new Text("qual"), new Value("junk".getBytes()));
         b.getBatchWriter(tableNames.get(i % tableNames.size())).addMutation(m);
       }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/main/java/org/apache/accumulo/test/functional/CreateManyScannersTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CreateManyScannersTest.java b/test/src/main/java/org/apache/accumulo/test/functional/CreateManyScannersTest.java
deleted file mode 100644
index c84fb9b..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/CreateManyScannersTest.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.security.Authorizations;
-
-public class CreateManyScannersTest extends FunctionalTest {
-  
-  @Override
-  public void cleanup() throws Exception {
-    
-  }
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.singletonList(new TableSetup("mscant"));
-  }
-  
-  @Override
-  public void run() throws Exception {
-    Connector connector = getConnector();
-    for (int i = 0; i < 100000; i++) {
-      connector.createScanner("mscant", Authorizations.EMPTY);
-    }
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
new file mode 100644
index 0000000..308560f
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.*;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.junit.Test;
+
+public class BigRootTabletIT extends MacTest {
+  // ACCUMULO-542: A large root tablet will fail to load if it does't fit in the tserver scan buffers
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String,String> siteConfig = new HashMap<String, String>();
+    siteConfig.put(Property.TABLE_SCAN_MAXMEM.getKey(), "1024");
+    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "60m");
+    cfg.setSiteConfig(siteConfig );
+  }
+
+  @Test(timeout=60*1000)
+  public void test() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().addSplits(MetadataTable.NAME, FunctionalTestUtils.splits("0 1 2 3 4 5 6 7 8 9 a".split(" ")));
+    for (int i = 0; i < 10; i++) {
+      c.tableOperations().create("" + i);
+      c.tableOperations().flush(MetadataTable.NAME, null, null, true);
+      c.tableOperations().flush(RootTable.NAME, null, null, true);
+    }
+    cluster.stop();
+    cluster.start();
+    int count = 0;
+    for (@SuppressWarnings("unused") Entry<Key,Value> entry : c.createScanner(RootTable.NAME, Authorizations.EMPTY))
+      count++;
+    assertTrue(count > 0);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
index 484c55c..fb28715 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BinaryIT.java
@@ -45,7 +45,7 @@ public class BinaryIT extends MacTest {
     runTest(c);
   }
   
-  void runTest(Connector c) throws Exception {
+  public static void runTest(Connector c) throws Exception {
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     ScannerOpts scanOpts = new ScannerOpts();
     TestBinaryRows.Opts opts = new TestBinaryRows.Opts();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/BinaryStressIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BinaryStressIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BinaryStressIT.java
new file mode 100644
index 0000000..8c3af67
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BinaryStressIT.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.junit.Test;
+
+public class BinaryStressIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String,String> siteConfig = new HashMap<String,String>();
+    siteConfig.put(Property.TSERV_MAXMEM.getKey(), "50K");
+    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "0");
+    cfg.setSiteConfig(siteConfig );
+  }
+
+  @Test(timeout=60*1000)
+  public void binaryStressTest() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("bt");
+    c.tableOperations().setProperty("bt", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+    BinaryIT.runTest(c);
+    String id = c.tableOperations().tableIdMap().get("bt");
+    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+    FileStatus[] dir = fs.listStatus(new Path(cluster.getConfig().getDir() + "/accumulo/tables/" + id));
+    assertTrue(dir.length  > 7);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
index 19153df..d222991 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
@@ -49,15 +49,15 @@ public class BloomFilterIT extends MacTest {
     cfg.setDefaultMemory(500, MemoryUnit.MEGABYTE);
   }
   
-  @Test(timeout=120*1000)
+  @Test(timeout=500*1000)
   public void test() throws Exception {
     Connector c = getConnector();
     for (String table : "bt1 bt2 bt3 bt4".split(" ")) {
       c.tableOperations().create(table);
     }
-    write(c, "bt1", 1, 0, 1000000000, 500);
-    write(c, "bt2", 2, 0, 1000000000, 500);
-    write(c, "bt3", 3, 0, 1000000000, 500);
+    write(c, "bt1", 1, 0, 1000000000, 100);
+    write(c, "bt2", 2, 0, 1000000000, 100);
+    write(c, "bt3", 3, 0, 1000000000, 100);
     
     // test inserting an empty key
     BatchWriter bw = c.createBatchWriter("bt4", new BatchWriterConfig());
@@ -80,9 +80,9 @@ public class BloomFilterIT extends MacTest {
     FunctionalTestUtils.checkRFiles(c, "bt4", 1, 1, 1, 1);
     
     // these queries should only run quickly if bloom filters are working, so lets get a base
-    long t1 = query(c, "bt1", 1, 0, 1000000000, 100000, 500);
-    long t2 = query(c, "bt2", 2, 0, 1000000000, 100000, 500);
-    long t3 = query(c, "bt3", 3, 0, 1000000000, 100000, 500);
+    long t1 = query(c, "bt1", 1, 0, 1000000000, 100000, 100);
+    long t2 = query(c, "bt2", 2, 0, 1000000000, 100000, 100);
+    long t3 = query(c, "bt3", 3, 0, 1000000000, 100000, 100);
     
     c.tableOperations().setProperty("bt1", Property.TABLE_BLOOM_ENABLED.getKey(), "true");
     c.tableOperations().setProperty("bt1", Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName());
@@ -102,9 +102,9 @@ public class BloomFilterIT extends MacTest {
     
     // these queries should only run quickly if bloom
     // filters are working
-    long tb1 = query(c, "bt1", 1, 0, 1000000000, 100000, 500);
-    long tb2 = query(c, "bt2", 2, 0, 1000000000, 100000, 500);
-    long tb3 = query(c, "bt3", 3, 0, 1000000000, 100000, 500);
+    long tb1 = query(c, "bt1", 1, 0, 1000000000, 100000, 100);
+    long tb2 = query(c, "bt2", 2, 0, 1000000000, 100000, 100);
+    long tb3 = query(c, "bt3", 3, 0, 1000000000, 100000, 100);
     
     timeCheck(t1, tb1);
     timeCheck(t2, tb2);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBlancerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBlancerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBlancerIT.java
new file mode 100644
index 0000000..fb46f1e
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ChaoticBlancerIT.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.server.master.balancer.ChaoticLoadBalancer;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class ChaoticBlancerIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String,String> siteConfig = new HashMap<String, String>();
+    siteConfig.put(Property.TSERV_MAXMEM.getKey(), "10K");
+    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "0");
+    siteConfig.put(Property.TABLE_LOAD_BALANCER.getKey(), ChaoticLoadBalancer.class.getName());
+    cfg.setSiteConfig(siteConfig );
+  }
+
+  @Test(timeout=120*1000)
+  public void test() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+    SortedSet<Text> splits = new TreeSet<Text>();
+    for (int i = 0; i < 200; i++) {
+      splits.add(new Text(String.format("%03d", i)));
+    }
+    c.tableOperations().create("unused");
+    c.tableOperations().addSplits("unused", splits);
+    TestIngest.Opts opts = new TestIngest.Opts();
+    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+    vopts.rows = opts.rows = 200000;
+    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    c.tableOperations().flush("test_ingest", null, null, true);
+    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
new file mode 100644
index 0000000..c41fae1
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CreateManyScannersIT.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.security.Authorizations;
+import org.junit.Test;
+
+public class CreateManyScannersIT extends MacTest {
+  
+  @Test(timeout=10*1000)
+  public void run() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("mscant");
+    for (int i = 0; i < 100000; i++) {
+      c.createScanner("mscant", Authorizations.EMPTY);
+    }
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
index 4af2c96..8fd50f5 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
@@ -23,8 +23,10 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.SortedSet;
 import java.util.Map.Entry;
 import java.util.Set;
+import java.util.TreeSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
@@ -160,5 +162,11 @@ public class FunctionalTestUtils {
   static Mutation nm(String row, String cf, String cq, String value) {
     return nm(row, cf, cq, new Value(value.getBytes()));
   }
+  public static SortedSet<Text> splits(String [] splits) {
+    SortedSet<Text> result = new TreeSet<Text>();
+    for (String split : splits)
+      result.add(new Text(split));
+    return result;
+  }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java b/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java
new file mode 100644
index 0000000..ae503de
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MetadataMaxFiles.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.*;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.impl.MasterClient;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.master.thrift.MasterClientService.Client;
+import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
+import org.apache.accumulo.core.master.thrift.TableInfo;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.security.CredentialHelper;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.accumulo.trace.instrument.Tracer;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MetadataMaxFiles extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String,String> siteConfig = new HashMap<String, String>();
+    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
+    siteConfig.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "10");
+    cfg.setSiteConfig(siteConfig );
+  }
+
+  @Test(timeout=240*1000)
+  public void test() throws Exception {
+    Connector c = getConnector();
+    SortedSet<Text> splits = new TreeSet<Text>();
+    for (int i = 0; i < 1000; i++) {
+      splits.add(new Text(String.format("%03d", i)));
+    }
+    c.tableOperations().setProperty(MetadataTable.NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "10000");
+    for (int i = 0; i < 5; i++) {
+      String tableName = "table" + i;
+      log.info("Creating " + tableName);
+      c.tableOperations().create(tableName);
+      log.info("adding splits");
+      c.tableOperations().addSplits(tableName, splits);
+      log.info("flushing");
+      c.tableOperations().flush(MetadataTable.NAME, null, null, true);
+      c.tableOperations().flush(RootTable.NAME, null, null, true);
+    }
+    UtilWaitThread.sleep(20*1000);
+    log.info("shutting down");
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+    cluster.stop();
+    log.info("starting up");
+    cluster.start();
+
+    UtilWaitThread.sleep(30*1000);
+
+    while (true) {
+      MasterMonitorInfo stats = null;
+      TCredentials creds = CredentialHelper.create("root", new PasswordToken(MacTest.PASSWORD), c.getInstance().getInstanceName());
+      Client client = null;
+      try {
+        client = MasterClient.getConnectionWithRetry(c.getInstance());
+        stats = client.getMasterStats(Tracer.traceInfo(), creds);
+      } finally {
+        if (client != null)
+          MasterClient.close(client);
+      }
+      int tablets = 0;
+      for (TabletServerStatus tserver : stats.tServerInfo) {
+        for (Entry<String,TableInfo> entry : tserver.tableMap.entrySet()) {
+          if (entry.getKey().startsWith("!"))
+            continue;
+          tablets += entry.getValue().onlineTablets;
+        }
+      }
+      if (tablets == 5005)
+        break;
+      UtilWaitThread.sleep(1000);
+    }
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
index bc89e20..7c74326 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -57,7 +57,7 @@ import org.junit.Test;
 
 public class ReadWriteIT extends MacTest {
   
-  static final int ROWS = 20000;
+  static final int ROWS = 200000;
   static final int COLS = 1;
   static final String COLF = "colf";
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
index 741e216..c4719e0 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
@@ -48,7 +48,7 @@ public class SplitIT extends MacTest {
   public void configure(MiniAccumuloConfig cfg) {
     Map<String,String> siteConfig = new HashMap<String,String>();
     siteConfig.put(Property.TSERV_MAXMEM.getKey(), "5K");
-    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1s");
+    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "100ms");
     cfg.setSiteConfig(siteConfig);
   }
   
@@ -92,8 +92,8 @@ public class SplitIT extends MacTest {
     c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
     c.tableOperations().setProperty("test_ingest", Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
     ReadWriteIT.interleaveTest(c);
-    UtilWaitThread.sleep(5 * 1000);
-    assertTrue(c.tableOperations().listSplits("test_ingest").size() > 10);
+    UtilWaitThread.sleep(5*1000);
+    assertTrue(c.tableOperations().listSplits("test_ingest").size() > 20);
   }
   
   @Test(timeout = 120 * 1000)
@@ -102,6 +102,8 @@ public class SplitIT extends MacTest {
     c.tableOperations().create("test_ingest");
     c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
     DeleteIT.deleteTest(c);
+    c.tableOperations().flush("test_ingest", null, null, true);
+    UtilWaitThread.sleep(5*1000);
     assertTrue(c.tableOperations().listSplits("test_ingest").size() > 30);
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
index 2dc6802..2c3c86d 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
@@ -42,7 +42,7 @@ import org.junit.Test;
  */
 public class TimeoutIT extends MacTest {
   
-  @Test
+  @Test(timeout=30*1000)
   public void run() throws Exception {
     Connector conn = getConnector();
     testBatchWriterTimeout(conn);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java
new file mode 100644
index 0000000..df8e656
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/WriteLotsIT.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+public class WriteLotsIT extends MacTest {
+  
+  @Test(timeout=20*1000)
+  public void writeLots() throws Exception {
+    final Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    final AtomicReference<Exception> ref = new AtomicReference<Exception>();
+    List<Thread> threads = new ArrayList<Thread>();
+    for (int i = 0; i < 10; i++) {
+      final int index = i;
+      Thread t = new Thread() {
+        public void run() {
+          try {
+            TestIngest.Opts opts = new TestIngest.Opts();
+            opts.startRow = index * 10000;
+            opts.rows = 10000;
+            TestIngest.ingest(c, opts, new BatchWriterOpts());
+          } catch (Exception ex) {
+            ref.set(ex);
+          }
+        }
+      };
+      t.start();
+      threads.add(t);
+    }
+    for (Thread thread : threads) {
+      thread.join();
+    }
+    if (ref.get() != null) {
+      throw ref.get();
+    }
+    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+    vopts.rows = 10000 * 10;
+    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/system/auto/stress/batchWrite.py
----------------------------------------------------------------------
diff --git a/test/system/auto/stress/batchWrite.py b/test/system/auto/stress/batchWrite.py
deleted file mode 100755
index f2f83b0..0000000
--- a/test/system/auto/stress/batchWrite.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import time
-
-from TestUtils import TestUtilsMixin, ROOT_PASSWORD
-
-log = logging.getLogger('test.auto')
-
-class WriteLots(unittest.TestCase, TestUtilsMixin):
-    """Start a clean accumulo, ingest some data using lots of clients at once,
-    and verify it was stored properly"""
-
-    order = 30
-    settings = TestUtilsMixin.settings.copy()
-    settings['instance.zookeeper.timeout'] = '40s'
-
-    def ingest(self, host, start, count, **kwargs):
-        klass = 'org.apache.accumulo.test.TestIngest'
-        args = '--user root --size 50 --random 56 --rows %d --start %d --cols 1 -p %s' % (count, start, ROOT_PASSWORD)
-        return self.runClassOn(host, klass, args.split(), **kwargs)
-
-    def setUp(self):
-        TestUtilsMixin.setUp(self);
-        
-        # initialize the database
-        self.createTable("test_ingest")
-
-    def tearDown(self):
-        TestUtilsMixin.tearDown(self)
-        self.pkill(self.masterHost(), 'TestIngest')
-
-    def runTest(self):
-        N = 10*len(self.hosts)
-        waitTime = 60 * N * self.options.rows / 200000 + 90
-
-        log.info("Starting %d clients", N)
-        handles = []
-        for i in range(N):
-            # start test ingestion
-            handle = self.ingest(self.hosts[i % len(self.hosts)],
-                                 i * self.options.rows,
-				 self.options.rows)
-            handles.append(handle)
-
-        end = time.time() + waitTime
-        for handle in handles:
-            waitTime = end - time.time()
-            log.debug("Waiting %s seconds", waitTime)
-            self.waitForStop(handle, waitTime)
-
-        log.info("Verifying Ingestion")
-        self.waitForStop(self.verify(self.masterHost(), self.options.rows * N),
-                         waitTime)
-        self.shutdown_accumulo()
-        
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(WriteLots())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/system/auto/stress/bigRootTablet.py
----------------------------------------------------------------------
diff --git a/test/system/auto/stress/bigRootTablet.py b/test/system/auto/stress/bigRootTablet.py
deleted file mode 100755
index d69a79c..0000000
--- a/test/system/auto/stress/bigRootTablet.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import logging
-import unittest
-
-from TestUtils import TestUtilsMixin
-
-log = logging.getLogger('test.auto')
-
-class BigRootTablet(TestUtilsMixin, unittest.TestCase):
-    "ACCUMULO-542: A large root tablet will fail to load if it does't fit in the tserver scan buffers"
-
-    order = 80
-
-    settings = TestUtilsMixin.settings.copy()
-    settings['table.scan.max.memory'] = '1024'
-    settings['tserver.compaction.major.delay'] = '60m'
-
-    def setUp(self):
-        TestUtilsMixin.setUp(self);
-    
-    def tearDown(self):
-        TestUtilsMixin.tearDown(self);
-    
-    def runTest(self):
-	cmd = 'table !METADATA\naddsplits 0 1 2 3 4 5 6 7 8 9 a\n'
-        for i in range(10):
-	    cmd += 'createtable %s\nflush -t !METADATA\n' % i
-        self.shell(self.masterHost(), cmd)
-	self.stop_accumulo()
-	self.start_accumulo()
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(BigRootTablet())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/system/auto/stress/binary.py
----------------------------------------------------------------------
diff --git a/test/system/auto/stress/binary.py b/test/system/auto/stress/binary.py
deleted file mode 100755
index 41f584d..0000000
--- a/test/system/auto/stress/binary.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-
-import os
-import logging
-
-from TestUtils import ACCUMULO_DIR
-from simple.binary import BinaryTest
-
-log = logging.getLogger('test.auto')
-
-class BinaryStressTest(BinaryTest) :
-    order = 80
-
-    tableSettings = BinaryTest.tableSettings.copy()
-    tableSettings['bt'] = { 
-    	'table.split.threshold': '10K',
-        }
-    settings = BinaryTest.settings.copy()
-    settings.update({
-        'tserver.memory.maps.max':'50K',
-        'tserver.compaction.major.delay': 0,
-        })
-
-    def runTest(self):
-        BinaryTest.runTest(self)
-        handle = self.runOn(self.masterHost(), [
-            'hadoop', 'fs', '-ls', os.path.join(ACCUMULO_DIR,'tables',self.getTableId('bt'))
-            ])
-        out, err = handle.communicate()
-        if len(out.split('\n')) < 8:
-            log.debug(out)
-        self.assert_(len(out.split('\n')) > 7)
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(BinaryStressTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/system/auto/stress/manyScanners.py
----------------------------------------------------------------------
diff --git a/test/system/auto/stress/manyScanners.py b/test/system/auto/stress/manyScanners.py
deleted file mode 100755
index 229b931..0000000
--- a/test/system/auto/stress/manyScanners.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class CreateManyScannersTest(JavaTest):
-    "Test creating a lot of scanners"
-
-    order = 9999
-    testClass="org.apache.accumulo.test.functional.CreateManyScannersTest"
-    maxRuntime = 60
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(CreateManyScannersTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/390ca3f2/test/system/auto/stress/metadataMaxFiles.py
----------------------------------------------------------------------
diff --git a/test/system/auto/stress/metadataMaxFiles.py b/test/system/auto/stress/metadataMaxFiles.py
deleted file mode 100755
index 8a65b81..0000000
--- a/test/system/auto/stress/metadataMaxFiles.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-
-from TestUtils import TestUtilsMixin
-import os
-
-import logging
-log = logging.getLogger('test.auto')
-
-class MetadataMaxFiles(TestUtilsMixin, unittest.TestCase):
-    "open a large !METADATA with too few files"
-
-    order=75
-
-    settings = TestUtilsMixin.settings.copy()
-    settings['tserver.compaction.major.delay'] = 1
-
-    def runTest(self):
-        # Create a bunch of tables with splits to split the !METADATA table
-        self.splitfile = 'splits'
-        fp = open(self.splitfile, 'w')
-        for i in range(1000):
-            fp.write('%03d\n' % i)
-        fp.close()
-        self.splitfile = os.path.realpath(self.splitfile)
-        self.shell(self.masterHost(),
-                   'config -t !METADATA -s table.split.threshold=10000\n' + 
-                   ''.join(['createtable test%d -sf %s\nflush -t !METADATA\n' % (i, self.splitfile) for i in range(5)]))
-        self.shutdown_accumulo(150)
-        
-        # reconfigure accumulo to use a very small number of files
-        self.stop_accumulo()
-        self.settings['tserver.scan.files.open.max'] = 10
-        self.create_config_file(self.settings)
-
-        # make sure the master knows about all the tables we created
-        self.sleep(5)
-        self.start_accumulo()
-        self.sleep(60)
-        h = self.runOn(self.masterHost(),
-                       [self.accumulo_sh(), 'org.apache.accumulo.test.GetMasterStats'])
-        out, err = h.communicate()
-        self.assert_(len([x for x in out.split('\n') if x.find('  Tablets: 1001') == 0]) == 5)
-
-    def tearDown(self):
-        TestUtilsMixin.tearDown(self)
-        os.unlink(self.splitfile)
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(MetadataMaxFiles())
-    return result


[46/50] [abbrv] git commit: ACCUMULO-1496 fix sorted pom

Posted by ct...@apache.org.
ACCUMULO-1496 fix sorted pom


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/e01cf8dc
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/e01cf8dc
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/e01cf8dc

Branch: refs/heads/ACCUMULO-1496
Commit: e01cf8dc00e66539c0c4054957326bdd2a51f8d8
Parents: b17d980
Author: Christopher Tubbs <ct...@apache.org>
Authored: Tue Jul 16 15:14:54 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Tue Jul 16 15:14:54 2013 -0400

----------------------------------------------------------------------
 pom.xml | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/e01cf8dc/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 39f6091..1955f08 100644
--- a/pom.xml
+++ b/pom.xml
@@ -206,11 +206,6 @@
         <version>1.2.16</version>
       </dependency>
       <dependency>
-        <groupId>org.scannotation</groupId>
-        <artifactId>scannotation</artifactId>
-        <version>1.0.3</version>
-      </dependency>
-      <dependency>
         <groupId>org.apache.accumulo</groupId>
         <artifactId>accumulo-core</artifactId>
         <version>${project.version}</version>
@@ -363,6 +358,11 @@
         <version>${powermock.version}</version>
       </dependency>
       <dependency>
+        <groupId>org.scannotation</groupId>
+        <artifactId>scannotation</artifactId>
+        <version>1.0.3</version>
+      </dependency>
+      <dependency>
         <groupId>org.slf4j</groupId>
         <artifactId>slf4j-api</artifactId>
         <version>${slf4j.version}</version>


[23/50] [abbrv] ACCUMULO-1537 completed the conversion of functional tests to IT; also converted ShellServerTest to an IT

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/simple/splitRecovery.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/splitRecovery.py b/test/system/auto/simple/splitRecovery.py
deleted file mode 100755
index c860f99..0000000
--- a/test/system/auto/simple/splitRecovery.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class SplitRecoveryTest(JavaTest):
-    "Test recovery of partial splits"
-
-    order = 20
-    testClass="org.apache.accumulo.test.functional.SplitRecoveryTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(SplitRecoveryTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/simple/wal.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/wal.py b/test/system/auto/simple/wal.py
deleted file mode 100755
index a3e4bef..0000000
--- a/test/system/auto/simple/wal.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import signal
-import os
-import unittest
-import logging
-from simple.readwrite import SunnyDayTest
-from TestUtils import ACCUMULO_HOME
-
-log = logging.getLogger('test.auto')
-
-class WriteAheadLog(SunnyDayTest):
-
-     order = 25
-
-     settings = SunnyDayTest.settings.copy()
-   
-     # roll the log at least once
-     settings['tserver.walog.max.size'] = '2M'
-     settings['gc.cycle.delay'] = 1
-     settings['gc.cycle.start'] = 1
-     settings['master.recovery.delay'] = 0
-
-     # compact frequently
-     settings['tserver.memory.maps.max'] = '200K'
-     settings['tserver.compaction.major.delay'] = 1
-
-     # split frequently
-     tableSettings = SunnyDayTest.tableSettings.copy()
-     tableSettings['test_ingest'] = { 
-         'table.split.threshold': '750K',
-         }
-
-     def runTest(self):
-          self.sleep(3)
-          waitTime = self.waitTime()
-          self.waitForStop(self.ingester, waitTime)
-          log.info("Stopping tablet servers hard")
-          self.stop_accumulo(signal.SIGKILL)
-          self.sleep(5)
-          self.start_accumulo()
-          h = self.runOn(self.masterHost(), [self.accumulo_sh(), "gc"])
-          self.sleep(3)
-          log.info("Verifying Ingestion")
-          self.waitForStop(self.verify(self.masterHost(),
-                                       self.options.rows,
-                                       size=self.options.size),
-                           waitTime)
-          self.shutdown_accumulo()
-
-def suite():
-     result = unittest.TestSuite()
-     result.addTest(WriteAheadLog())
-     return result
-

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/simple/zoo.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/zoo.py b/test/system/auto/simple/zoo.py
deleted file mode 100755
index d84cbe9..0000000
--- a/test/system/auto/simple/zoo.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import signal
-
-from readwrite import SunnyDayTest
-import TestUtils
-
-class SessionExpired(SunnyDayTest):
-
-    order = 25
-
-    def signal(self, which):
-        for host in self.hosts:
-            self.pkill(host, '=tserver', which)
-
-    def runTest(self):
-        # stop the tservers from talking to zookeeeper
-        self.signal(signal.SIGSTOP)
-        
-        # timeout the session
-        self.sleep(40)
-        
-        # turn the tservers back on so that they see the expired session
-        self.signal(signal.SIGCONT)
-
-        # wait for the tesrvers to stop (master and monitor are first and last
-        # handles)
-        for h in self.accumuloHandles[1:-1]:
-            if 'tserver' in h.cmd:
-                self.waitForStop(h, 5)
-        self.cleanupAccumuloHandles()
-
-        
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(SessionExpired())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/simple/zooCacheTest.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/zooCacheTest.py b/test/system/auto/simple/zooCacheTest.py
deleted file mode 100755
index fcbb3ab..0000000
--- a/test/system/auto/simple/zooCacheTest.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import shutil
-import unittest
-import time
-
-from TestUtils import TestUtilsMixin, ACCUMULO_HOME, SITE, ZOOKEEPERS
-
-class ZooCacheTest(TestUtilsMixin, unittest.TestCase):
-    "Zoo Cache Test"
-
-    order = 21
-    testClass=""
-
-    def setUp(self):
-        self.create_config_file(self.settings.copy())
-        
-    def runTest(self):
-        shutil.rmtree('/tmp/zcTest-42')
-        handleCC = self.runClassOn('localhost', 'org.apache.accumulo.test.functional.CacheTestClean', ['/zcTest-42','/tmp/zcTest-42'])
-        self.waitForStop(handleCC, 10)
-        handleR1 = self.runClassOn('localhost', 'org.apache.accumulo.test.functional.CacheTestReader', ['/zcTest-42','/tmp/zcTest-42', ZOOKEEPERS])
-        handleR2 = self.runClassOn('localhost', 'org.apache.accumulo.test.functional.CacheTestReader', ['/zcTest-42','/tmp/zcTest-42', ZOOKEEPERS])
-        handleR3 = self.runClassOn('localhost', 'org.apache.accumulo.test.functional.CacheTestReader', ['/zcTest-42','/tmp/zcTest-42', ZOOKEEPERS])
-        handleW = self.runClassOn('localhost', 'org.apache.accumulo.test.functional.CacheTestWriter', ['/zcTest-42','/tmp/zcTest-42','3','500'])
-        self.waitForStop(handleW, 200)
-        self.waitForStop(handleR1, 1)
-        self.waitForStop(handleR2, 1)
-        self.waitForStop(handleR3, 1)
-
-    def tearDown(self):
-        os.unlink(os.path.join(ACCUMULO_HOME, 'conf', SITE))
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(ZooCacheTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/stress/halfDead.py
----------------------------------------------------------------------
diff --git a/test/system/auto/stress/halfDead.py b/test/system/auto/stress/halfDead.py
deleted file mode 100755
index fa5255a..0000000
--- a/test/system/auto/stress/halfDead.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import time
-import signal
-import unittest
-
-from simple.readwrite import SunnyDayTest
-from TestUtils import ACCUMULO_HOME
-
-import logging
-log = logging.getLogger('test.auto')
-
-class TabletServerHangs(SunnyDayTest):
-
-     order = 25
-   
-     # connections should timeout quickly for faster tests
-     settings = SunnyDayTest.settings.copy()
-     settings['general.rpc.timeout'] = '5s'
-     settings['instance.zookeeper.timeout'] = '15s'
-
-     def start_tserver(self, host):
-         log.info("Starting tserver we can pause with bad read/writes")
-         libpath = '%s/test/system/auto/fake_disk_failure.so' % ACCUMULO_HOME
-         os.environ['LD_PRELOAD'] = libpath
-         os.environ['DYLD_INSERT_LIBRARIES'] = libpath
-         os.environ['DYLD_FORCE_FLAT_NAMESPACE'] = 'true'
-         self.stop = self.runOn(self.masterHost(), [self.accumulo_sh(), 'tserver'])
-         del os.environ['LD_PRELOAD']
-         del os.environ['DYLD_FORCE_FLAT_NAMESPACE']
-         del os.environ['DYLD_INSERT_LIBRARIES']
-         self.flagFile = os.getenv("HOME") + "/HOLD_IO_%d" % self.stop.pid
-         log.debug("flag file is " + self.flagFile)
-         return self.stop
-          
-     def runTest(self):
-         waitTime = self.waitTime()
-         log.info("Waiting for ingest to stop")
-         self.waitForStop(self.ingester, waitTime)
-         MANY_ROWS = 500000
-
-         
-         self.ingester = self.ingest(self.masterHost(),
-                                     MANY_ROWS,
-                                     size=self.options.size)
-         # wait for the ingester to get going
-         for i in range(100):
-	     line = self.ingester.stdout.readline()
-	     if line == '' or line.find(' sent ') > 0:
-                break
-
-         log.info("Starting faking disk failure for tserver")
-         fp = open(self.flagFile, "w+")
-         fp.close()
-
-         self.sleep(10)
-         log.info("Ending faking disk failure for tserver")
-         os.unlink(self.flagFile)
-
-         # look for the log message that indicates a timeout
-         out, err = self.waitForStop(self.ingester, waitTime)
-         self.assert_(out.find('requeuing') >= 0)
-
-         log.info("Verifying Ingestion")
-         self.waitForStop(self.verify(self.masterHost(),
-                                      MANY_ROWS,
-                                      size=self.options.size),
-                          waitTime)
-         os.kill(self.stop.pid, signal.SIGHUP)
-
-         # look for the log message that indicates the tablet server stopped for a while
-         out, err = self.stop.communicate()
-         self.assert_(err.find('sleeping\nsleeping\nsleeping\n') >= 0)
-          
-
-     def tearDown(self):
-         SunnyDayTest.tearDown(self)
-         try:
-              os.unlink(self.flagFile)
-         except:
-              pass
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(TabletServerHangs())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/stress/migrations.py
----------------------------------------------------------------------
diff --git a/test/system/auto/stress/migrations.py b/test/system/auto/stress/migrations.py
deleted file mode 100755
index d07d7a8..0000000
--- a/test/system/auto/stress/migrations.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import time
-import sleep
-
-from TestUtils import TestUtilsMixin, ACCUMULO_DIR
-
-log = logging.getLogger('test.auto')
-
-from simple.readwrite import SunnyDayTest, Interleaved
-from simple.delete import DeleteTest
-
-class ChaoticBalancerIntegrity(SunnyDayTest):
-    """Start a new table, create many splits, and attempt ingest while running a crazy load balancer"""
-
-    order = 90
-
-    settings = TestUtilsMixin.settings.copy()
-    settings.update({
-      'tserver.memory.maps.max':'10K',
-      'tserver.compaction.major.delay': 0,
-      'table.balancer':'org.apache.accumulo.server.master.balancer.ChaoticLoadBalancer',
-      })
-
-    tableSettings = SunnyDayTest.tableSettings.copy()
-    tableSettings['test_ingest'] = { 
-    	'table.split.threshold': '10K',
-        }
-    def setUp(self):
-        # ensure we have two servers
-        if len(self.options.hosts) == 1:
-            self.options.hosts.append('localhost')
-        self.options.hosts = self.options.hosts[:2]
-        
-        TestUtilsMixin.setUp(self);
-
-        # create a table with 200 splits
-        import tempfile
-        fileno, filename = tempfile.mkstemp()
-        fp = os.fdopen(fileno, "wb")
-        try:
-            for i in range(200):
-                fp.write("%08x\n" % (i * 1000))
-        finally:
-            fp.close()
-        self.createTable('unused', filename)
-
-        # create an empty table
-        self.createTable('test_ingest')
-
-    def runTest(self):
-
-        # start test ingestion
-        log.info("Starting Test Ingester")
-        self.ingester = self.ingest(self.masterHost(),
-                                    200000,
-                                    size=self.options.size)
-        self.waitForStop(self.ingester, 120)
-        self.shell(self.masterHost(), 'flush -t test_ingest')
-        self.waitForStop(self.verify(self.masterHost(), self.options.rows), 60)
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(ChaoticBalancerIntegrity())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/stress/msplit.py
----------------------------------------------------------------------
diff --git a/test/system/auto/stress/msplit.py b/test/system/auto/stress/msplit.py
deleted file mode 100755
index 1d639e5..0000000
--- a/test/system/auto/stress/msplit.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import time
-
-from TestUtils import ACCUMULO_DIR
-
-from simple.split import TabletShouldSplit
-
-log = logging.getLogger('test.auto')
-
-class MetaSplitTest(TabletShouldSplit):
-
-    order = TabletShouldSplit.order + 1
-    
-    tableSettings = TabletShouldSplit.tableSettings.copy()
-    tableSettings['!METADATA'] = { 
-    	'table.split.threshold': 500,
-        }
-    tableSettings['test_ingest'] = { 
-    	'table.split.threshold': '200K',
-    	'table.file.compress.blocksize': '10K',
-        }
-
-    def runTest(self):
-        TabletShouldSplit.runTest(self)
-        handle = self.runOn(self.masterHost(), [
-            'hadoop', 'fs', '-ls', os.path.join(ACCUMULO_DIR,'tables',self.getTableId('!METADATA'))
-            ])
-        out, err = handle.communicate()
-        lst = [line for line in out.split('\n') if line.find('tables') >= 0]
-        self.assert_(len(lst) > 3)
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(MetaSplitTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/stress/restart.py
----------------------------------------------------------------------
diff --git a/test/system/auto/stress/restart.py b/test/system/auto/stress/restart.py
deleted file mode 100755
index 4e7f66c..0000000
--- a/test/system/auto/stress/restart.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from simple.readwrite import SunnyDayTest
-
-import unittest
-import logging
-import os
-import signal
-from TestUtils import TestUtilsMixin, ACCUMULO_HOME
-from subprocess import PIPE
-
-log = logging.getLogger('test.auto')
-
-class RestartTest(SunnyDayTest):
-    order = 80
-
-class RestartMasterTest(RestartTest):
-
-    def runTest(self):
-
-        self.sleep(3)
-        log.info("Stopping master server")
-        self.stop_master(self.masterHost())
-        self.sleep(1)
-        log.info("Starting master server")
-        self.start_master(self.masterHost())
-
-        self.waitForStop(self.ingester, 30)
-        self.waitForStop(self.verify(self.masterHost(), self.options.rows), 60)
-
-
-class RestartMasterRecovery(RestartTest):
-
-    settings = RestartTest.settings.copy()
-    settings['instance.zookeeper.timeout'] = 5
-
-    def runTest(self):
-        self.waitForStop(self.ingester, 30)
-
-        # start a recovery
-        self.stop_accumulo()
-        self.start_accumulo_procs()
-
-        self.sleep(5)
-
-        self.stop_master(self.masterHost())
-        self.start_master(self.masterHost())
-
-        self.waitForStop(self.verify(self.masterHost(), self.options.rows), 100)
-
-
-class RestartMasterSplitterTest(RestartMasterTest):
-    tableSettings = RestartMasterTest.tableSettings.copy()
-    tableSettings['test_ingest'] = { 
-            'table.split.threshold': '5K',
-        }
-
-
-class KilledTabletServerTest(RestartTest):
-
-    def startRead(self):
-        return self.verify(self.masterHost(), self.options.rows)
-
-    def stopRead(self, handle, timeout):
-        self.waitForStop(handle, timeout)
-
-    def readRows(self):
-        self.stopRead(self.startRead(), 400)
-
-    def runTest(self):
-
-        self.waitForStop(self.ingester, 120)
-        log.info("Ingester stopped")
-        log.info("starting scan")
-        self.readRows()
-        for host in self.hosts:
-            log.info("Restarting Tablet server on %s", host)
-            self.stop_tserver(host, signal.SIGKILL)
-            self.start_tserver(host)
-            log.info("Tablet server on %s started", host)
-            log.info("starting scan")
-            self.readRows()
-
-class KilledTabletServerSplitTest(KilledTabletServerTest):
-    tableSettings = KilledTabletServerTest.tableSettings.copy()
-    tableSettings['test_ingest'] = { 
-            'table.split.threshold': '5K',
-        }
-
-    settings = TestUtilsMixin.settings.copy()
-    settings.update({
-        'tserver.memory.maps.max':'5K',
-        'tserver.compaction.major.delay': 1,
-        'tserver.walog.max.size': '50K',
-        })
-
-    def ingest(self, host, count, start=0, timestamp=None, size=50, colf=None, **kwargs):
-        return KilledTabletServerTest.ingest(self, host, count*10, start, timestamp, size, colf)
-
-    def verify(self, host, count, start=0, size=50, timestamp=None, colf='colf'):
-        return KilledTabletServerTest.verify(self, host, count*10, start, size, timestamp, colf)
-            
-    def runTest(self):
-
-        for i in range(5):
-            self.sleep(20)
-            self.stop_tserver(self.masterHost(), signal.SIGKILL)
-            self.start_tserver(self.masterHost())
-
-        self.waitForStop(self.ingester, 600)
-        log.info("Ingester stopped")
-        log.info("starting scan")
-        self.readRows()
-        for host in self.hosts:
-            log.info("Restarting Tablet server on %s", host)
-            self.stop_tserver(host, signal.SIGKILL)
-            self.start_tserver(host)
-            log.info("Tablet server on %s started", host)
-            log.info("starting scan")
-            self.readRows()
-
-
-class KilledTabletDuringScan(KilledTabletServerTest):
-    "Kill a tablet server while we are scanning a table"
-
-    def runTest(self):
-
-        self.waitForStop(self.ingester, 90)
-        log.info("Ingester stopped")
-        handle = self.startRead()
-
-        for host in self.hosts:
-            log.info("Restarting Tablet server on %s", host)
-            self.stop_tserver(host, signal.SIGKILL)
-            self.start_tserver(host)
-            log.info("Tablet server on %s started", host)
-            log.info("starting scan")
-            self.stopRead(handle, 500)
-            if host != self.hosts[-1]:
-                handle = self.startRead()
-
-class KilledTabletDuringShutdown(KilledTabletServerTest):
-
-    def runTest(self):
-        self.waitForStop(self.ingester, 90)
-        log.info("Ingester stopped")
-        self.stop_tserver(self.hosts[0], signal.SIGKILL)
-        log.info("This can take a couple minutes")
-        self.shutdown_accumulo()
-
-
-from simple.split import TabletShouldSplit
-
-class ShutdownSplitter(TabletShouldSplit):
-    "Shutdown while compacting, splitting, and migrating"
-
-    tableSettings = TabletShouldSplit.tableSettings.copy()
-    tableSettings['!METADATA'] = { 
-            'table.split.threshold': '10K',
-        }
-    tableSettings['test_ingest'] = { 
-            'table.split.threshold': '5K',
-        }
-
-    def runTest(self):
-        self.sleep(1)
-        self.shutdown_accumulo()
-
-        # look for any exceptions
-        self.wait(
-            self.runOn(self.masterHost(),
-                       ['grep', '-r', '-q', '" at org.apache.accumulo.core"\\\\\\|" at org.apache.accumulo.server"',
-                        os.path.join(ACCUMULO_HOME,'logs') ])
-            )
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(ShutdownSplitter())
-    result.addTest(KilledTabletDuringShutdown())
-    result.addTest(RestartMasterRecovery())
-    result.addTest(KilledTabletDuringScan())
-    result.addTest(RestartMasterTest())
-    result.addTest(RestartMasterSplitterTest())
-    result.addTest(KilledTabletServerTest())
-    result.addTest(KilledTabletServerSplitTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/stress/table.py
----------------------------------------------------------------------
diff --git a/test/system/auto/stress/table.py b/test/system/auto/stress/table.py
deleted file mode 100755
index 7c3e4c5..0000000
--- a/test/system/auto/stress/table.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-
-import os
-from TestUtils import TestUtilsMixin, ROOT, ROOT_PASSWORD, ACCUMULO_DIR
-
-class Table(TestUtilsMixin, unittest.TestCase):
-    "create and destroy the same table repeatedly, sometimes ingesting into it"
-
-    order = 85
-
-    def deleteTable(self, table):
-        out, err, code = self.rootShell(self.masterHost(),
-                                        "deletetable %s\n" % table)
-        self.processResult(out, err, code)
-        self.sleep(0.5)
-        
-    def tables(self):
-        code, out, err = self.shell(self.masterHost(), "tables\n")
-        self.processResult(out, err, code)
-        return out
-
-    def runTest(self):
-        import tempfile
-        fileno, self.filename = tempfile.mkstemp()
-        fp = os.fdopen(fileno, "wb")
-        try:
-            for i in range(0, 200, 10):
-                fp.write("%08x\n" % (i * 1000))
-            fp.close()
-
-            for i in range(5):
-                self.createTable('test_ingest')
-                self.deleteTable('test_ingest')
-            self.createTable('test_ingest')
-            for i in range(5):
-                self.wait(self.ingest(self.masterHost(), 10, start=i*10))
-                self.wait(self.verify(self.masterHost(), 10, start=i*10))
-                self.deleteTable('test_ingest')
-                self.createTable('test_ingest', self.filename)
-        finally:
-            os.unlink(self.filename)
-        
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(Table())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/system/auto/stress/weird.py
----------------------------------------------------------------------
diff --git a/test/system/auto/stress/weird.py b/test/system/auto/stress/weird.py
deleted file mode 100755
index d7e0697..0000000
--- a/test/system/auto/stress/weird.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-
-from TestUtils import TestUtilsMixin
-
-log = logging.getLogger('test.auto')
-
-class LateLastContact(TestUtilsMixin, unittest.TestCase):
-    """Fake the "tablet stops talking but holds its lock" problem we see when hard drives and NFS fail.
-       Start a ZombieTServer, and see that master stops it.
-    """
-
-    order = 80
-
-    settings = TestUtilsMixin.settings.copy()
-    settings['general.rpc.timeout'] = '2s'
-
-    def setUp(self):
-        TestUtilsMixin.setUp(self);
-    
-    def tearDown(self):
-        TestUtilsMixin.tearDown(self);
-
-    def runTest(self):
-        handle = self.runClassOn(self.masterHost(), 'org.apache.accumulo.test.functional.ZombieTServer', [])
-        out, err = handle.communicate()
-        assert handle.returncode == 0
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(LateLastContact())
-    return result


[09/50] [abbrv] ACCUMULO-1481 : Add tests for splitting/merging root table; refactor to consolidate metadata constants and structures in an organized way; begin consolidating metadata ops into a servicer interface to abstract the code that actually does

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java b/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
index 79f8ec4..eb98e0c 100644
--- a/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
+++ b/server/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
@@ -21,8 +21,6 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.TreeMap;
 
-import org.junit.Assert;
-
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
@@ -32,10 +30,12 @@ import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.SortedMapIterator;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 import org.apache.hadoop.io.Text;
+import org.junit.Assert;
 import org.junit.Test;
 
 /**
@@ -83,24 +83,24 @@ public class MetadataBulkLoadFilterTest {
     TreeMap<Key,Value> expected = new TreeMap<Key,Value>();
     
     // following should not be deleted by filter
-    put(tm1, "2;m", MetadataTable.DIRECTORY_COLUMN, "/t1");
-    put(tm1, "2;m", MetadataTable.DATAFILE_COLUMN_FAMILY, "/t1/file1", "1,1");
-    put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file1", "5");
-    put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file3", "7");
-    put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file4", "9");
-    put(tm1, "2<", MetadataTable.DIRECTORY_COLUMN, "/t2");
-    put(tm1, "2<", MetadataTable.DATAFILE_COLUMN_FAMILY, "/t2/file2", "1,1");
-    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file6", "5");
-    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file7", "7");
-    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file8", "9");
-    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/fileC", null);
+    put(tm1, "2;m", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t1");
+    put(tm1, "2;m", DataFileColumnFamily.NAME, "/t1/file1", "1,1");
+    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file1", "5");
+    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file3", "7");
+    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file4", "9");
+    put(tm1, "2<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t2");
+    put(tm1, "2<", DataFileColumnFamily.NAME, "/t2/file2", "1,1");
+    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file6", "5");
+    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file7", "7");
+    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file8", "9");
+    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/fileC", null);
     
     expected.putAll(tm1);
-
+    
     // the following should be deleted by filter
-    put(tm1, "2;m", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t1/file5", "8");
-    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/file9", "8");
-    put(tm1, "2<", MetadataTable.BULKFILE_COLUMN_FAMILY, "/t2/fileA", "2");
+    put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file5", "8");
+    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file9", "8");
+    put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/fileA", "2");
     
     TestMetadataBulkLoadFilter iter = new TestMetadataBulkLoadFilter();
     iter.init(new SortedMapIterator(tm1), new HashMap<String,String>(), new IteratorEnvironment() {
@@ -111,8 +111,7 @@ public class MetadataBulkLoadFilterTest {
       }
       
       @Override
-      public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) {
-      }
+      public void registerSideChannel(SortedKeyValueIterator<Key,Value> iter) {}
       
       @Override
       public boolean isFullMajorCompaction() {
@@ -129,7 +128,7 @@ public class MetadataBulkLoadFilterTest {
         return null;
       }
     });
-
+    
     iter.seek(new Range(), new ArrayList<ByteSequence>(), false);
     
     TreeMap<Key,Value> actual = new TreeMap<Key,Value>();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java b/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java
index b2f84fa..32c3a33 100644
--- a/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java
+++ b/server/src/test/java/org/apache/accumulo/server/master/TestMergeState.java
@@ -34,10 +34,12 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.master.state.Assignment;
 import org.apache.accumulo.server.master.state.CurrentState;
 import org.apache.accumulo.server.master.state.MergeInfo;
@@ -101,14 +103,14 @@ public class TestMergeState {
     for (String s : splits) {
       Text split = new Text(s);
       Mutation prevRow = KeyExtent.getPrevRowUpdateMutation(new KeyExtent(tableId, split, pr));
-      prevRow.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
-      MetadataTable.CHOPPED_COLUMN.put(prevRow, new Value("junk".getBytes()));
+      prevRow.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
+      ChoppedColumnFamily.CHOPPED_COLUMN.put(prevRow, new Value("junk".getBytes()));
       bw.addMutation(prevRow);
       pr = split;
     }
     // Add the default tablet
     Mutation defaultTablet = KeyExtent.getPrevRowUpdateMutation(new KeyExtent(tableId, null, pr));
-    defaultTablet.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
+    defaultTablet.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
     bw.addMutation(defaultTablet);
     bw.close();
     
@@ -128,8 +130,8 @@ public class TestMergeState {
     // Create the hole
     // Split the tablet at one end of the range
     Mutation m = new KeyExtent(tableId, new Text("t"), new Text("p")).getPrevRowUpdateMutation();
-    MetadataTable.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
-    MetadataTable.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(new Text("o")));
+    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
+    TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(new Text("o")));
     update(connector, m);
     
     // do the state check
@@ -139,7 +141,7 @@ public class TestMergeState {
     
     // unassign the tablets
     BatchDeleter deleter = connector.createBatchDeleter("!METADATA", Authorizations.EMPTY, 1000, new BatchWriterConfig());
-    deleter.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+    deleter.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
     deleter.setRanges(Collections.singletonList(new Range()));
     deleter.delete();
     
@@ -150,7 +152,7 @@ public class TestMergeState {
     // finish the split
     KeyExtent tablet = new KeyExtent(tableId, new Text("p"), new Text("o"));
     m = tablet.getPrevRowUpdateMutation();
-    MetadataTable.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
+    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
     update(connector, m);
     metaDataStateStore.setLocations(Collections.singletonList(new Assignment(tablet, state.someTServer)));
     
@@ -160,7 +162,7 @@ public class TestMergeState {
     
     // chop it
     m = tablet.getPrevRowUpdateMutation();
-    MetadataTable.CHOPPED_COLUMN.put(m, new Value("junk".getBytes()));
+    ChoppedColumnFamily.CHOPPED_COLUMN.put(m, new Value("junk".getBytes()));
     update(connector, m);
     
     stats = scan(state, metaDataStateStore);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java b/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java
index b0a9528..a5613ea 100644
--- a/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java
+++ b/server/src/test/java/org/apache/accumulo/server/master/state/RootTabletStateStoreTest.java
@@ -28,8 +28,8 @@ import java.util.HashSet;
 import java.util.List;
 
 import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.util.AddressUtil;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
 import org.apache.hadoop.io.Text;
 import org.junit.Assert;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java b/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java
index f475c44..8fe1ea0 100644
--- a/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java
+++ b/server/src/test/java/org/apache/accumulo/server/tabletserver/CheckTabletMetadataTest.java
@@ -22,14 +22,13 @@ import java.util.TreeMap;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.hadoop.io.Text;
 import org.junit.Assert;
 import org.junit.Test;
 
-
 public class CheckTabletMetadataTest {
   
   private static Key nk(String row, ColumnFQ cfq) {
@@ -39,7 +38,7 @@ public class CheckTabletMetadataTest {
   private static Key nk(String row, Text cf, String cq) {
     return new Key(row, cf.toString(), cq);
   }
-
+  
   private static void put(TreeMap<Key,Value> tabletMeta, String row, ColumnFQ cfq, byte[] val) {
     Key k = new Key(new Text(row), cfq.getColumnFamily(), cfq.getColumnQualifier());
     tabletMeta.put(k, new Value(val));
@@ -49,25 +48,25 @@ public class CheckTabletMetadataTest {
     Key k = new Key(new Text(row), cf, new Text(cq));
     tabletMeta.put(k, new Value(val.getBytes()));
   }
-
+  
   private static void assertFail(TreeMap<Key,Value> tabletMeta, KeyExtent ke, TServerInstance tsi) {
     try {
       Assert.assertNull(TabletServer.checkTabletMetadata(ke, tsi, tabletMeta, ke.getMetadataEntry()));
     } catch (Exception e) {
-
+      
     }
   }
-
+  
   private static void assertFail(TreeMap<Key,Value> tabletMeta, KeyExtent ke, TServerInstance tsi, Key keyToDelete) {
     TreeMap<Key,Value> copy = new TreeMap<Key,Value>(tabletMeta);
     Assert.assertNotNull(copy.remove(keyToDelete));
     try {
       Assert.assertNull(TabletServer.checkTabletMetadata(ke, tsi, copy, ke.getMetadataEntry()));
     } catch (Exception e) {
-
+      
     }
   }
-
+  
   @Test
   public void testBadTabletMetadata() throws Exception {
     
@@ -75,10 +74,10 @@ public class CheckTabletMetadataTest {
     
     TreeMap<Key,Value> tabletMeta = new TreeMap<Key,Value>();
     
-    put(tabletMeta, "1<", MetadataTable.PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(null).get());
-    put(tabletMeta, "1<", MetadataTable.DIRECTORY_COLUMN, "/t1".getBytes());
-    put(tabletMeta, "1<", MetadataTable.TIME_COLUMN, "M0".getBytes());
-    put(tabletMeta, "1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "4", "127.0.0.1:9997");
+    put(tabletMeta, "1<", TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, KeyExtent.encodePrevEndRow(null).get());
+    put(tabletMeta, "1<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t1".getBytes());
+    put(tabletMeta, "1<", TabletsSection.ServerColumnFamily.TIME_COLUMN, "M0".getBytes());
+    put(tabletMeta, "1<", TabletsSection.FutureLocationColumnFamily.NAME, "4", "127.0.0.1:9997");
     
     TServerInstance tsi = new TServerInstance("127.0.0.1:9997", 4);
     
@@ -94,30 +93,30 @@ public class CheckTabletMetadataTest {
     
     assertFail(tabletMeta, new KeyExtent(new Text("1"), new Text("r"), new Text("m")), tsi);
     
-    assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.PREV_ROW_COLUMN));
-
-    assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.DIRECTORY_COLUMN));
+    assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN));
     
-    assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.TIME_COLUMN));
+    assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN));
     
-    assertFail(tabletMeta, ke, tsi, nk("1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "4"));
+    assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.ServerColumnFamily.TIME_COLUMN));
+    
+    assertFail(tabletMeta, ke, tsi, nk("1<", TabletsSection.FutureLocationColumnFamily.NAME, "4"));
     
     TreeMap<Key,Value> copy = new TreeMap<Key,Value>(tabletMeta);
-    put(copy, "1<", MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, "4", "127.0.0.1:9997");
+    put(copy, "1<", TabletsSection.CurrentLocationColumnFamily.NAME, "4", "127.0.0.1:9997");
     assertFail(copy, ke, tsi);
-    assertFail(copy, ke, tsi, nk("1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "4"));
+    assertFail(copy, ke, tsi, nk("1<", TabletsSection.FutureLocationColumnFamily.NAME, "4"));
     
     copy = new TreeMap<Key,Value>(tabletMeta);
-    put(copy, "1<", MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, "5", "127.0.0.1:9998");
+    put(copy, "1<", TabletsSection.CurrentLocationColumnFamily.NAME, "5", "127.0.0.1:9998");
     assertFail(copy, ke, tsi);
-    put(copy, "1<", MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, "6", "127.0.0.1:9999");
+    put(copy, "1<", TabletsSection.CurrentLocationColumnFamily.NAME, "6", "127.0.0.1:9999");
     assertFail(copy, ke, tsi);
     
     copy = new TreeMap<Key,Value>(tabletMeta);
-    put(copy, "1<", MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, "5", "127.0.0.1:9998");
+    put(copy, "1<", TabletsSection.FutureLocationColumnFamily.NAME, "5", "127.0.0.1:9998");
     assertFail(copy, ke, tsi);
     
     assertFail(new TreeMap<Key,Value>(), ke, tsi);
-
+    
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java b/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java
index 93f7b51..e2d1ecb 100644
--- a/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java
+++ b/server/src/test/java/org/apache/accumulo/server/util/CloneTest.java
@@ -31,6 +31,9 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.hadoop.io.Text;
 
@@ -43,8 +46,8 @@ public class CloneTest extends TestCase {
     KeyExtent ke = new KeyExtent(new Text("0"), null, null);
     Mutation mut = ke.getPrevRowUpdateMutation();
     
-    MetadataTable.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
-    MetadataTable.DIRECTORY_COLUMN.put(mut, new Value("/default_tablet".getBytes()));
+    TabletsSection.ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value("/default_tablet".getBytes()));
     
     BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
@@ -54,9 +57,9 @@ public class CloneTest extends TestCase {
     
     BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
-    MetadataTable.initializeClone("0", "1", conn, bw2);
+    MetadataTableUtil.initializeClone("0", "1", conn, bw2);
     
-    int rc = MetadataTable.checkClone("0", "1", conn, bw2);
+    int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
     
     assertEquals(0, rc);
     
@@ -71,9 +74,9 @@ public class CloneTest extends TestCase {
     KeyExtent ke = new KeyExtent(new Text("0"), null, null);
     Mutation mut = ke.getPrevRowUpdateMutation();
     
-    MetadataTable.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
-    MetadataTable.DIRECTORY_COLUMN.put(mut, new Value("/default_tablet".getBytes()));
-    mut.put(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/0_0.rf", "1,200");
+    TabletsSection.ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value("/default_tablet".getBytes()));
+    mut.put(DataFileColumnFamily.NAME.toString(), "/default_tablet/0_0.rf", "1,200");
     
     BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
@@ -83,20 +86,20 @@ public class CloneTest extends TestCase {
     
     BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
-    MetadataTable.initializeClone("0", "1", conn, bw2);
+    MetadataTableUtil.initializeClone("0", "1", conn, bw2);
     
     Mutation mut2 = new Mutation(ke.getMetadataEntry());
-    mut2.putDelete(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/0_0.rf");
-    mut2.put(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/1_0.rf", "2,300");
+    mut2.putDelete(DataFileColumnFamily.NAME.toString(), "/default_tablet/0_0.rf");
+    mut2.put(DataFileColumnFamily.NAME.toString(), "/default_tablet/1_0.rf", "2,300");
     
     bw1.addMutation(mut2);
     bw1.flush();
     
-    int rc = MetadataTable.checkClone("0", "1", conn, bw2);
+    int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
     
     assertEquals(1, rc);
     
-    rc = MetadataTable.checkClone("0", "1", conn, bw2);
+    rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
     
     assertEquals(0, rc);
     
@@ -106,7 +109,7 @@ public class CloneTest extends TestCase {
     HashSet<String> files = new HashSet<String>();
     
     for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY))
+      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME))
         files.add(entry.getKey().getColumnQualifier().toString());
     }
     
@@ -128,14 +131,14 @@ public class CloneTest extends TestCase {
     
     BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
-    MetadataTable.initializeClone("0", "1", conn, bw2);
+    MetadataTableUtil.initializeClone("0", "1", conn, bw2);
     
     bw1.addMutation(createTablet("0", "m", null, "/default_tablet", "/default_tablet/0_0.rf"));
     bw1.addMutation(createTablet("0", null, "m", "/t-1", "/default_tablet/0_0.rf"));
     
     bw1.flush();
     
-    int rc = MetadataTable.checkClone("0", "1", conn, bw2);
+    int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
     
     assertEquals(0, rc);
     
@@ -146,7 +149,7 @@ public class CloneTest extends TestCase {
     
     int count = 0;
     for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
         files.add(entry.getKey().getColumnQualifier().toString());
         count++;
       }
@@ -170,20 +173,20 @@ public class CloneTest extends TestCase {
     
     BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
-    MetadataTable.initializeClone("0", "1", conn, bw2);
+    MetadataTableUtil.initializeClone("0", "1", conn, bw2);
     
     bw1.addMutation(createTablet("0", "m", null, "/default_tablet", "/default_tablet/1_0.rf"));
     Mutation mut3 = createTablet("0", null, "m", "/t-1", "/default_tablet/1_0.rf");
-    mut3.putDelete(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), "/default_tablet/0_0.rf");
+    mut3.putDelete(DataFileColumnFamily.NAME.toString(), "/default_tablet/0_0.rf");
     bw1.addMutation(mut3);
     
     bw1.flush();
     
-    int rc = MetadataTable.checkClone("0", "1", conn, bw2);
+    int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
     
     assertEquals(1, rc);
     
-    rc = MetadataTable.checkClone("0", "1", conn, bw2);
+    rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
     
     assertEquals(0, rc);
     
@@ -195,7 +198,7 @@ public class CloneTest extends TestCase {
     int count = 0;
     
     for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
         files.add(entry.getKey().getColumnQualifier().toString());
         count++;
       }
@@ -209,10 +212,10 @@ public class CloneTest extends TestCase {
   private static Mutation deleteTablet(String tid, String endRow, String prevRow, String dir, String file) throws Exception {
     KeyExtent ke = new KeyExtent(new Text(tid), endRow == null ? null : new Text(endRow), prevRow == null ? null : new Text(prevRow));
     Mutation mut = new Mutation(ke.getMetadataEntry());
-    MetadataTable.PREV_ROW_COLUMN.putDelete(mut);
-    MetadataTable.TIME_COLUMN.putDelete(mut);
-    MetadataTable.DIRECTORY_COLUMN.putDelete(mut);
-    mut.putDelete(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), file);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.putDelete(mut);
+    TabletsSection.ServerColumnFamily.TIME_COLUMN.putDelete(mut);
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.putDelete(mut);
+    mut.putDelete(DataFileColumnFamily.NAME.toString(), file);
     
     return mut;
   }
@@ -221,9 +224,9 @@ public class CloneTest extends TestCase {
     KeyExtent ke = new KeyExtent(new Text(tid), endRow == null ? null : new Text(endRow), prevRow == null ? null : new Text(prevRow));
     Mutation mut = ke.getPrevRowUpdateMutation();
     
-    MetadataTable.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
-    MetadataTable.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes()));
-    mut.put(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), file, "10,200");
+    TabletsSection.ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0".getBytes()));
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes()));
+    mut.put(DataFileColumnFamily.NAME.toString(), file, "10,200");
     
     return mut;
   }
@@ -242,7 +245,7 @@ public class CloneTest extends TestCase {
     
     BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
-    MetadataTable.initializeClone("0", "1", conn, bw2);
+    MetadataTableUtil.initializeClone("0", "1", conn, bw2);
     
     bw1.addMutation(createTablet("0", "f", null, "/d1", "/d1/file3"));
     bw1.addMutation(createTablet("0", "m", "f", "/d3", "/d1/file1"));
@@ -251,7 +254,7 @@ public class CloneTest extends TestCase {
     
     bw1.flush();
     
-    int rc = MetadataTable.checkClone("0", "1", conn, bw2);
+    int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
     
     assertEquals(0, rc);
     
@@ -262,7 +265,7 @@ public class CloneTest extends TestCase {
     
     int count = 0;
     for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
         files.add(entry.getKey().getColumnQualifier().toString());
         count++;
       }
@@ -289,7 +292,7 @@ public class CloneTest extends TestCase {
     
     BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
-    MetadataTable.initializeClone("0", "1", conn, bw2);
+    MetadataTableUtil.initializeClone("0", "1", conn, bw2);
     
     bw1.addMutation(deleteTablet("0", "m", null, "/d1", "/d1/file1"));
     bw1.addMutation(deleteTablet("0", null, "m", "/d2", "/d2/file2"));
@@ -303,7 +306,7 @@ public class CloneTest extends TestCase {
     
     bw1.flush();
     
-    int rc = MetadataTable.checkClone("0", "1", conn, bw2);
+    int rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
     
     assertEquals(1, rc);
     
@@ -315,7 +318,7 @@ public class CloneTest extends TestCase {
     
     bw1.flush();
     
-    rc = MetadataTable.checkClone("0", "1", conn, bw2);
+    rc = MetadataTableUtil.checkClone("0", "1", conn, bw2);
     
     assertEquals(0, rc);
     
@@ -326,7 +329,7 @@ public class CloneTest extends TestCase {
     
     int count = 0;
     for (Entry<Key,Value> entry : scanner) {
-      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
         files.add(entry.getKey().getColumnQualifier().toString());
         count++;
       }
@@ -353,17 +356,17 @@ public class CloneTest extends TestCase {
     
     BatchWriter bw2 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     
-    MetadataTable.initializeClone("0", "1", conn, bw2);
+    MetadataTableUtil.initializeClone("0", "1", conn, bw2);
     
     bw1.addMutation(deleteTablet("0", "m", null, "/d1", "/d1/file1"));
     Mutation mut = createTablet("0", null, null, "/d2", "/d2/file2");
-    mut.put(MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), "/d1/file1", "10,200");
+    mut.put(DataFileColumnFamily.NAME.toString(), "/d1/file1", "10,200");
     bw1.addMutation(mut);
     
     bw1.flush();
     
     try {
-      MetadataTable.checkClone("0", "1", conn, bw2);
+      MetadataTableUtil.checkClone("0", "1", conn, bw2);
       assertTrue(false);
     } catch (TabletIterator.TabletDeletedException tde) {}
     

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java b/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
index 122b493..72ce334 100644
--- a/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
+++ b/server/src/test/java/org/apache/accumulo/server/util/TabletIteratorTest.java
@@ -31,8 +31,10 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.util.TabletIterator.TabletDeletedException;
 import org.apache.hadoop.io.Text;
 
@@ -43,10 +45,11 @@ public class TabletIteratorTest extends TestCase {
     private Connector conn;
     
     public TestTabletIterator(Connector conn) throws Exception {
-      super(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY), MetadataTable.KEYSPACE, true, true);
+      super(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY), MetadataSchema.TabletsSection.getRange(), true, true);
       this.conn = conn;
     }
     
+    @Override
     protected void resetScanner() {
       try {
         Scanner ds = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
@@ -81,11 +84,11 @@ public class TabletIteratorTest extends TestCase {
     
     KeyExtent ke1 = new KeyExtent(new Text("0"), new Text("m"), null);
     Mutation mut1 = ke1.getPrevRowUpdateMutation();
-    MetadataTable.DIRECTORY_COLUMN.put(mut1, new Value("/d1".getBytes()));
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut1, new Value("/d1".getBytes()));
     
     KeyExtent ke2 = new KeyExtent(new Text("0"), null, null);
     Mutation mut2 = ke2.getPrevRowUpdateMutation();
-    MetadataTable.DIRECTORY_COLUMN.put(mut2, new Value("/d2".getBytes()));
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut2, new Value("/d2".getBytes()));
     
     BatchWriter bw1 = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
     bw1.addMutation(mut1);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java b/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
index b125176..45cc557 100644
--- a/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
+++ b/test/src/main/java/org/apache/accumulo/test/QueryMetadataTable.java
@@ -35,8 +35,9 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.hadoop.io.Text;
@@ -111,7 +112,7 @@ public class QueryMetadataTable {
       if (count % 72 == 0) {
         System.out.printf(" %,d%n", count);
       }
-      if (entry.getKey().compareRow(mdrow) == 0 && entry.getKey().getColumnFamily().compareTo(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
+      if (entry.getKey().compareRow(mdrow) == 0 && entry.getKey().getColumnFamily().compareTo(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
         System.out.println(entry.getKey() + " " + entry.getValue());
         location = entry.getValue().toString();
       }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java b/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
index 8edcd92..b051988 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
@@ -20,8 +20,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map.Entry;
 
-import javax.crypto.Mac;
-
 import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -34,7 +32,6 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.accumulo.test.functional.MacTest;
 import org.apache.hadoop.io.Text;
 
 import com.beust.jcommander.Parameter;
@@ -42,11 +39,11 @@ import com.beust.jcommander.Parameter;
 public class TestMultiTableIngest {
   
   static class Opts extends ClientOpts {
-    @Parameter(names="--readonly", description="read only")
+    @Parameter(names = "--readonly", description = "read only")
     boolean readonly = false;
-    @Parameter(names="--tables", description="number of tables to create")
+    @Parameter(names = "--tables", description = "number of tables to create")
     int tables = 5;
-    @Parameter(names="--count", description="number of entries to create")
+    @Parameter(names = "--count", description = "number of entries to create")
     int count = 10000;
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java b/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java
index 8a80050..ea677da 100644
--- a/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java
+++ b/test/src/main/java/org/apache/accumulo/test/continuous/ContinuousStatsCollector.java
@@ -36,8 +36,9 @@ import org.apache.accumulo.core.master.thrift.MasterClientService;
 import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.Stat;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.cli.ClientOnRequiredTable;
@@ -91,7 +92,7 @@ public class ContinuousStatsCollector {
       Connector conn = opts.getConnector();
       Scanner scanner = conn.createScanner(MetadataTable.NAME, opts.auths);
       scanner.setBatchSize(scanBatchSize);
-      scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+      scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
       scanner.addScanIterator(new IteratorSetting(1000, "cfc", ColumnFamilyCounter.class.getName()));
       scanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
       

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTest.java b/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTest.java
index 0d0cfc3..d169596 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTest.java
@@ -40,9 +40,11 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
@@ -193,8 +195,8 @@ public abstract class FunctionalTest {
     Scanner scanner = getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     String tableId = Tables.getNameToIdMap(getInstance()).get(tableName);
     scanner.setRange(new Range(new Text(tableId + ";"), true, new Text(tableId + "<"), true));
-    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
-    MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
     
     HashMap<Text,Integer> tabletFileCounts = new HashMap<Text,Integer>();
     
@@ -205,7 +207,7 @@ public abstract class FunctionalTest {
       Integer count = tabletFileCounts.get(row);
       if (count == null)
         count = 0;
-      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
         count = count + 1;
       }
       

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
index ce31323..8cb79c3 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryTest.java
@@ -36,9 +36,12 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.rfile.RFile;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.ColumnFQ;
-import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooLock.LockLossReason;
@@ -53,7 +56,7 @@ import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.tabletserver.TabletServer;
 import org.apache.accumulo.server.tabletserver.TabletTime;
 import org.apache.accumulo.server.util.FileUtil;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher;
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
@@ -137,7 +140,7 @@ public class SplitRecoveryTest extends FunctionalTest {
       KeyExtent extent = extents[i];
       
       String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId().toString() + "/dir_" + i;
-      MetadataTable.addTablet(extent, tdir, SecurityConstants.getSystemCredentials(), TabletTime.LOGICAL_TIME_ID, zl);
+      MetadataTableUtil.addTablet(extent, tdir, SecurityConstants.getSystemCredentials(), TabletTime.LOGICAL_TIME_ID, zl);
       SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<FileRef,DataFileValue>();
       mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i));
       
@@ -146,7 +149,7 @@ public class SplitRecoveryTest extends FunctionalTest {
       }
       int tid = 0;
       TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
-      MetadataTable.updateTabletDataFile(tid, extent, mapFiles, "L0", SecurityConstants.getSystemCredentials(), zl);
+      MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", SecurityConstants.getSystemCredentials(), zl);
     }
     
     KeyExtent extent = extents[extentToSplit];
@@ -164,24 +167,24 @@ public class SplitRecoveryTest extends FunctionalTest {
     SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
     List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
     
-    MetadataTable.splitDatafiles(extent.getTableId(), midRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), mapFiles, lowDatafileSizes,
+    MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), mapFiles, lowDatafileSizes,
         highDatafileSizes, highDatafilesToRemove);
     
-    MetadataTable.splitTablet(high, extent.getPrevEndRow(), splitRatio, SecurityConstants.getSystemCredentials(), zl);
+    MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SecurityConstants.getSystemCredentials(), zl);
     TServerInstance instance = new TServerInstance(location, zl.getSessionId());
     Writer writer = new Writer(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), MetadataTable.ID);
     Assignment assignment = new Assignment(high, instance);
     Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
-    m.put(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, assignment.server.asColumnQualifier(), assignment.server.asMutationValue());
+    m.put(TabletsSection.FutureLocationColumnFamily.NAME, assignment.server.asColumnQualifier(), assignment.server.asMutationValue());
     writer.update(m);
     
     if (steps >= 1) {
-      Map<FileRef,Long> bulkFiles = MetadataTable.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), extent);
-      MetadataTable.addNewTablet(low, "/lowDir", instance, lowDatafileSizes, bulkFiles, SecurityConstants.getSystemCredentials(), TabletTime.LOGICAL_TIME_ID
-          + "0", -1l, -1l, zl);
+      Map<FileRef,Long> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), extent);
+      MetadataTableUtil.addNewTablet(low, "/lowDir", instance, lowDatafileSizes, bulkFiles, SecurityConstants.getSystemCredentials(),
+          TabletTime.LOGICAL_TIME_ID + "0", -1l, -1l, zl);
     }
     if (steps >= 2)
-      MetadataTable.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SecurityConstants.getSystemCredentials(), zl);
+      MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SecurityConstants.getSystemCredentials(), zl);
     
     TabletServer.verifyTabletInformation(high, instance, null, "127.0.0.1:0", zl);
     
@@ -189,8 +192,8 @@ public class SplitRecoveryTest extends FunctionalTest {
       ensureTabletHasNoUnexpectedMetadataEntries(low, lowDatafileSizes);
       ensureTabletHasNoUnexpectedMetadataEntries(high, highDatafileSizes);
       
-      Map<FileRef,Long> lowBulkFiles = MetadataTable.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), low);
-      Map<FileRef,Long> highBulkFiles = MetadataTable.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), high);
+      Map<FileRef,Long> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), low);
+      Map<FileRef,Long> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), high);
       
       if (!lowBulkFiles.equals(highBulkFiles)) {
         throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high);
@@ -209,17 +212,17 @@ public class SplitRecoveryTest extends FunctionalTest {
     scanner.setRange(extent.toMetadataRange());
     
     HashSet<ColumnFQ> expectedColumns = new HashSet<ColumnFQ>();
-    expectedColumns.add(MetadataTable.DIRECTORY_COLUMN);
-    expectedColumns.add(MetadataTable.PREV_ROW_COLUMN);
-    expectedColumns.add(MetadataTable.TIME_COLUMN);
-    expectedColumns.add(MetadataTable.LOCK_COLUMN);
+    expectedColumns.add(TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN);
+    expectedColumns.add(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
+    expectedColumns.add(TabletsSection.ServerColumnFamily.TIME_COLUMN);
+    expectedColumns.add(TabletsSection.ServerColumnFamily.LOCK_COLUMN);
     
     HashSet<Text> expectedColumnFamilies = new HashSet<Text>();
-    expectedColumnFamilies.add(MetadataTable.DATAFILE_COLUMN_FAMILY);
-    expectedColumnFamilies.add(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY);
-    expectedColumnFamilies.add(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
-    expectedColumnFamilies.add(MetadataTable.LAST_LOCATION_COLUMN_FAMILY);
-    expectedColumnFamilies.add(MetadataTable.BULKFILE_COLUMN_FAMILY);
+    expectedColumnFamilies.add(DataFileColumnFamily.NAME);
+    expectedColumnFamilies.add(TabletsSection.FutureLocationColumnFamily.NAME);
+    expectedColumnFamilies.add(TabletsSection.CurrentLocationColumnFamily.NAME);
+    expectedColumnFamilies.add(TabletsSection.LastLocationColumnFamily.NAME);
+    expectedColumnFamilies.add(TabletsSection.BulkFileColumnFamily.NAME);
     
     Iterator<Entry<Key,Value>> iter = scanner.iterator();
     while (iter.hasNext()) {
@@ -244,7 +247,7 @@ public class SplitRecoveryTest extends FunctionalTest {
       throw new Exception("Not all expected columns seen " + extent + " " + expectedColumns);
     }
     
-    SortedMap<FileRef,DataFileValue> fixedMapFiles = MetadataTable.getDataFileSizes(extent, SecurityConstants.getSystemCredentials());
+    SortedMap<FileRef,DataFileValue> fixedMapFiles = MetadataTableUtil.getDataFileSizes(extent, SecurityConstants.getSystemCredentials());
     verifySame(expectedMapFiles, fixedMapFiles);
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java b/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
index 3675870..5602f14 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
@@ -35,9 +35,11 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.AddressUtil;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.Stat;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.security.SecurityConstants;
@@ -99,10 +101,10 @@ public class MetadataBatchScanTest {
         
         String dir = "/t-" + UUID.randomUUID();
         
-        MetadataTable.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes()));
+        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir.getBytes()));
         
         for (int i = 0; i < 5; i++) {
-          mut.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text(dir + "/00000_0000" + i + ".map"), new Value("10000,1000000".getBytes()));
+          mut.put(DataFileColumnFamily.NAME, new Text(dir + "/00000_0000" + i + ".map"), new Value("10000,1000000".getBytes()));
         }
         
         bw.addMutation(mut);
@@ -165,8 +167,8 @@ public class MetadataBatchScanTest {
     Scanner scanner = null;
     
     BatchScanner bs = connector.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
-    bs.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
-    MetadataTable.PREV_ROW_COLUMN.fetch(bs);
+    bs.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(bs);
     
     bs.setRanges(ranges);
     

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java b/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
index 692f415..08430e5 100644
--- a/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
+++ b/test/src/main/java/org/apache/accumulo/test/performance/scan/CollectTabletStats.java
@@ -27,9 +27,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
-import java.util.SortedSet;
+import java.util.SortedMap;
 import java.util.TreeMap;
-import java.util.TreeSet;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -59,6 +58,7 @@ import org.apache.accumulo.core.iterators.system.ColumnQualifierFilter;
 import org.apache.accumulo.core.iterators.system.DeletingIterator;
 import org.apache.accumulo.core.iterators.system.MultiIterator;
 import org.apache.accumulo.core.iterators.system.VisibilityFilter;
+import org.apache.accumulo.core.metadata.MetadataServicer;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
@@ -70,7 +70,7 @@ import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -84,16 +84,16 @@ public class CollectTabletStats {
   private static final Logger log = Logger.getLogger(CollectTabletStats.class);
   
   static class CollectOptions extends ClientOnRequiredTable {
-    @Parameter(names="--iterations", description="number of iterations")
+    @Parameter(names = "--iterations", description = "number of iterations")
     int iterations = 3;
-    @Parameter(names="-t", description="number of threads")
+    @Parameter(names = "-t", description = "number of threads")
     int numThreads = 1;
-    @Parameter(names="-f", description="select far tablets, default is to use local tablets")
+    @Parameter(names = "-f", description = "select far tablets, default is to use local tablets")
     boolean selectFarTablets = false;
-    @Parameter(names="-c", description="comma separated list of columns")
+    @Parameter(names = "-c", description = "comma separated list of columns")
     String columns;
   }
-
+  
   public static void main(String[] args) throws Exception {
     
     final CollectOptions opts = new CollectOptions();
@@ -106,7 +106,7 @@ public class CollectTabletStats {
     final String columns[] = columnsTmp;
     
     final VolumeManager fs = VolumeManagerImpl.get();
-
+    
     Instance instance = opts.getInstance();
     final ServerConfiguration sconf = new ServerConfiguration(instance);
     
@@ -116,8 +116,9 @@ public class CollectTabletStats {
       System.exit(-1);
     }
     
-    Map<KeyExtent,String> locations = new HashMap<KeyExtent,String>();
-    List<KeyExtent> candidates = findTablets(!opts.selectFarTablets, CredentialHelper.create(opts.principal, opts.getToken(), opts.instance), opts.tableName, instance, locations);
+    TreeMap<KeyExtent,String> tabletLocations = new TreeMap<KeyExtent,String>();
+    List<KeyExtent> candidates = findTablets(!opts.selectFarTablets, CredentialHelper.create(opts.principal, opts.getToken(), opts.instance), opts.tableName,
+        instance, tabletLocations);
     
     if (candidates.size() < opts.numThreads) {
       System.err.println("ERROR : Unable to find " + opts.numThreads + " " + (opts.selectFarTablets ? "far" : "local") + " tablets");
@@ -142,7 +143,7 @@ public class CollectTabletStats {
     for (KeyExtent ke : tabletsToTest) {
       System.out.println("\t *** Information about tablet " + ke.getUUID() + " *** ");
       System.out.println("\t\t# files in tablet : " + tabletFiles.get(ke).size());
-      System.out.println("\t\ttablet location   : " + locations.get(ke));
+      System.out.println("\t\ttablet location   : " + tabletLocations.get(ke));
       reportHdfsBlockLocations(tabletFiles.get(ke));
     }
     
@@ -157,6 +158,7 @@ public class CollectTabletStats {
       for (final KeyExtent ke : tabletsToTest) {
         final List<FileRef> files = tabletFiles.get(ke);
         Test test = new Test(ke) {
+          @Override
           public int runTest() throws Exception {
             return readFiles(fs, sconf.getConfiguration(), files, ke, columns);
           }
@@ -176,6 +178,7 @@ public class CollectTabletStats {
       for (final KeyExtent ke : tabletsToTest) {
         final List<FileRef> files = tabletFiles.get(ke);
         Test test = new Test(ke) {
+          @Override
           public int runTest() throws Exception {
             return readFilesUsingIterStack(fs, sconf, files, opts.auths, ke, columns, false);
           }
@@ -193,6 +196,7 @@ public class CollectTabletStats {
       for (final KeyExtent ke : tabletsToTest) {
         final List<FileRef> files = tabletFiles.get(ke);
         Test test = new Test(ke) {
+          @Override
           public int runTest() throws Exception {
             return readFilesUsingIterStack(fs, sconf, files, opts.auths, ke, columns, true);
           }
@@ -212,6 +216,7 @@ public class CollectTabletStats {
       
       for (final KeyExtent ke : tabletsToTest) {
         Test test = new Test(ke) {
+          @Override
           public int runTest() throws Exception {
             return scanTablet(conn, opts.tableName, opts.auths, scanOpts.scanBatchSize, ke.getPrevEndRow(), ke.getEndRow(), columns);
           }
@@ -227,6 +232,7 @@ public class CollectTabletStats {
       final Connector conn = opts.getConnector();
       
       threadPool.submit(new Runnable() {
+        @Override
         public void run() {
           try {
             calcTabletStats(conn, opts.tableName, opts.auths, scanOpts.scanBatchSize, ke, columns);
@@ -259,6 +265,7 @@ public class CollectTabletStats {
       this.finishCdl = fcdl;
     }
     
+    @Override
     public void run() {
       
       try {
@@ -337,23 +344,26 @@ public class CollectTabletStats {
     
   }
   
-  private static List<KeyExtent> findTablets(boolean selectLocalTablets, TCredentials credentials, String table, Instance zki,
-      Map<KeyExtent,String> locations) throws Exception {
-    SortedSet<KeyExtent> tablets = new TreeSet<KeyExtent>();
+  private static List<KeyExtent> findTablets(boolean selectLocalTablets, TCredentials credentials, String tableName, Instance zki,
+      SortedMap<KeyExtent,String> tabletLocations) throws Exception {
     
-    MetadataTable.getEntries(zki, credentials, table, false, locations, tablets);
+    String tableId = Tables.getNameToIdMap(zki).get(tableName);
+    MetadataServicer.forTableId(zki, credentials, tableId).getTabletLocations(tabletLocations);
     
     InetAddress localaddress = InetAddress.getLocalHost();
     
     List<KeyExtent> candidates = new ArrayList<KeyExtent>();
     
-    for (Entry<KeyExtent,String> entry : locations.entrySet()) {
-      boolean isLocal = AddressUtil.parseAddress(entry.getValue(), 4).getAddress().equals(localaddress);
-      
-      if (selectLocalTablets && isLocal) {
-        candidates.add(entry.getKey());
-      } else if (!selectLocalTablets && !isLocal) {
-        candidates.add(entry.getKey());
+    for (Entry<KeyExtent,String> entry : tabletLocations.entrySet()) {
+      String loc = entry.getValue();
+      if (loc != null) {
+        boolean isLocal = AddressUtil.parseAddress(entry.getValue(), 4).getAddress().equals(localaddress);
+        
+        if (selectLocalTablets && isLocal) {
+          candidates.add(entry.getKey());
+        } else if (!selectLocalTablets && !isLocal) {
+          candidates.add(entry.getKey());
+        }
       }
     }
     return candidates;
@@ -373,7 +383,7 @@ public class CollectTabletStats {
   }
   
   private static List<FileRef> getTabletFiles(TCredentials token, Instance zki, String tableId, KeyExtent ke) throws IOException {
-    return new ArrayList<FileRef>(MetadataTable.getDataFileSizes(ke, token).keySet());
+    return new ArrayList<FileRef>(MetadataTableUtil.getDataFileSizes(ke, token).keySet());
   }
   
   private static void reportHdfsBlockLocations(List<FileRef> files) throws Exception {
@@ -456,9 +466,8 @@ public class CollectTabletStats {
     return columnSet;
   }
   
-  private static int readFilesUsingIterStack(VolumeManager fs, ServerConfiguration aconf, List<FileRef> files, Authorizations auths, KeyExtent ke, String[] columns,
-      boolean useTableIterators)
-      throws Exception {
+  private static int readFilesUsingIterStack(VolumeManager fs, ServerConfiguration aconf, List<FileRef> files, Authorizations auths, KeyExtent ke,
+      String[] columns, boolean useTableIterators) throws Exception {
     
     SortedKeyValueIterator<Key,Value> reader;
     
@@ -472,7 +481,7 @@ public class CollectTabletStats {
     List<IterInfo> emptyIterinfo = Collections.emptyList();
     Map<String,Map<String,String>> emptySsio = Collections.emptyMap();
     TableConfiguration tconf = aconf.getTableConfiguration(ke.getTableId().toString());
-    reader = createScanIterator(ke, readers,auths, new byte[] {}, new HashSet<Column>(), emptyIterinfo, emptySsio, useTableIterators, tconf);
+    reader = createScanIterator(ke, readers, auths, new byte[] {}, new HashSet<Column>(), emptyIterinfo, emptySsio, useTableIterators, tconf);
     
     HashSet<ByteSequence> columnSet = createColumnBSS(columns);
     
@@ -489,7 +498,8 @@ public class CollectTabletStats {
     
   }
   
-  private static int scanTablet(Connector conn, String table, Authorizations auths, int batchSize, Text prevEndRow, Text endRow, String[] columns) throws Exception {
+  private static int scanTablet(Connector conn, String table, Authorizations auths, int batchSize, Text prevEndRow, Text endRow, String[] columns)
+      throws Exception {
     
     Scanner scanner = conn.createScanner(table, auths);
     scanner.setBatchSize(batchSize);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/AddSplits.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/AddSplits.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/AddSplits.java
index c7a82eb..aa976f7 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/AddSplits.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/AddSplits.java
@@ -25,7 +25,7 @@ import java.util.TreeSet;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.TableOfflineException;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.test.randomwalk.State;
 import org.apache.accumulo.test.randomwalk.Test;
 import org.apache.hadoop.io.Text;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/CheckBalance.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/CheckBalance.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/CheckBalance.java
index 7a77b46..f0a93ac 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/CheckBalance.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/CheckBalance.java
@@ -24,8 +24,9 @@ import java.util.Properties;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.test.randomwalk.State;
 import org.apache.accumulo.test.randomwalk.Test;
 
@@ -42,7 +43,7 @@ public class CheckBalance extends Test {
     log.debug("checking balance");
     Map<String,Long> counts = new HashMap<String,Long>();
     Scanner scanner = state.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
     for (Entry<Key,Value> entry : scanner) {
       String location = entry.getKey().getColumnQualifier().toString();
       Long count = counts.get(location);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Merge.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Merge.java b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Merge.java
index b0d7ae1..8fcfab5 100644
--- a/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Merge.java
+++ b/test/src/main/java/org/apache/accumulo/test/randomwalk/concurrent/Merge.java
@@ -24,7 +24,7 @@ import java.util.Random;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.TableOfflineException;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.test.randomwalk.State;
 import org.apache.accumulo.test.randomwalk.Test;
 import org.apache.hadoop.io.Text;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/MetaSplitTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/MetaSplitTest.java b/test/src/test/java/org/apache/accumulo/test/MetaSplitTest.java
index 4a9c3f5..d3ebd0f 100644
--- a/test/src/test/java/org/apache/accumulo/test/MetaSplitTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/MetaSplitTest.java
@@ -21,9 +21,11 @@ import static org.junit.Assert.assertEquals;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
+import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.hadoop.io.Text;
@@ -44,7 +46,6 @@ public class MetaSplitTest {
     MiniAccumuloConfig cfg = new MiniAccumuloConfig(folder.newFolder("miniAccumulo"), secret);
     cluster = new MiniAccumuloCluster(cfg);
     cluster.start();
-    
   }
   
   @AfterClass
@@ -53,6 +54,22 @@ public class MetaSplitTest {
     folder.delete();
   }
   
+  @Test(expected = AccumuloException.class)
+  public void testRootTableSplit() throws Exception {
+    Connector connector = cluster.getConnector("root", secret);
+    TableOperations opts = connector.tableOperations();
+    SortedSet<Text> splits = new TreeSet<Text>();
+    splits.add(new Text("5"));
+    opts.addSplits(RootTable.NAME, splits);
+  }
+  
+  @Test
+  public void testRootTableMerge() throws Exception {
+    Connector connector = cluster.getConnector("root", secret);
+    TableOperations opts = connector.tableOperations();
+    opts.merge(RootTable.NAME, null, null);
+  }
+  
   private void addSplits(TableOperations opts, String... points) throws Exception {
     SortedSet<Text> splits = new TreeSet<Text>();
     for (String point : points) {
@@ -62,7 +79,7 @@ public class MetaSplitTest {
   }
   
   @Test(timeout = 60000)
-  public void testMetaSplit() throws Exception {
+  public void testMetadataTableSplit() throws Exception {
     Connector connector = cluster.getConnector("root", secret);
     TableOperations opts = connector.tableOperations();
     for (int i = 1; i <= 10; i++) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/TestAccumuloSplitRecovery.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/TestAccumuloSplitRecovery.java b/test/src/test/java/org/apache/accumulo/test/TestAccumuloSplitRecovery.java
index 31eaf41..c481f91 100644
--- a/test/src/test/java/org/apache/accumulo/test/TestAccumuloSplitRecovery.java
+++ b/test/src/test/java/org/apache/accumulo/test/TestAccumuloSplitRecovery.java
@@ -30,9 +30,11 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.hadoop.io.Text;
@@ -71,7 +73,7 @@ public class TestAccumuloSplitRecovery {
     String tableId = connector.tableOperations().tableIdMap().get(tablename);
     Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     scanner.setRange(new Range(new Text(tableId + ";"), new Text(tableId + "<")));
-    scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
     for (@SuppressWarnings("unused")
     Entry<Key,Value> entry : scanner) {
       return false;
@@ -104,8 +106,8 @@ public class TestAccumuloSplitRecovery {
       KeyExtent extent = new KeyExtent(new Text(tableId), null, new Text("b"));
       Mutation m = extent.getPrevRowUpdateMutation();
       
-      MetadataTable.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(0.5).getBytes()));
-      MetadataTable.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(null));
+      TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(0.5).getBytes()));
+      TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(null));
       bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
       bw.addMutation(m);
       
@@ -115,15 +117,15 @@ public class TestAccumuloSplitRecovery {
         
         Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
         scanner.setRange(extent.toMetadataRange());
-        scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+        scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
         
         KeyExtent extent2 = new KeyExtent(new Text(tableId), new Text("b"), null);
         m = extent2.getPrevRowUpdateMutation();
-        MetadataTable.DIRECTORY_COLUMN.put(m, new Value("/t2".getBytes()));
-        MetadataTable.TIME_COLUMN.put(m, new Value("M0".getBytes()));
+        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t2".getBytes()));
+        TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value("M0".getBytes()));
         
         for (Entry<Key,Value> entry : scanner) {
-          m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, entry.getKey().getColumnQualifier(), entry.getValue());
+          m.put(DataFileColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
         }
         
         bw.addMutation(m);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/VolumeTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/VolumeTest.java b/test/src/test/java/org/apache/accumulo/test/VolumeTest.java
index 896f387..18b0c9a 100644
--- a/test/src/test/java/org/apache/accumulo/test/VolumeTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/VolumeTest.java
@@ -33,8 +33,8 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.hadoop.fs.Path;
@@ -104,7 +104,7 @@ public class VolumeTest {
     // verify the new files are written to the different volumes
     scanner = connector.createScanner("!METADATA", Authorizations.EMPTY);
     scanner.setRange(new Range("1", "1<"));
-    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
     int fileCount = 0;
     for (Entry<Key,Value> entry : scanner) {
       boolean inV1 = entry.getKey().getColumnQualifier().toString().contains(v1.toString());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java
index 0a55566..5b1a83f 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java
@@ -33,9 +33,10 @@ import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.accumulo.server.util.Admin;
 import org.apache.accumulo.test.VerifyIngest;
@@ -44,17 +45,16 @@ import org.junit.Test;
 
 public class CompactionIT extends MacTest {
   
-  
   @Override
   public void configure(MiniAccumuloConfig cfg) {
-    Map<String, String> map = new HashMap<String, String>();
+    Map<String,String> map = new HashMap<String,String>();
     map.put(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), "4");
     map.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
     map.put(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
     cfg.setSiteConfig(map);
   }
   
-  @Test(timeout=120*1000)
+  @Test(timeout = 120 * 1000)
   public void test() throws Exception {
     final Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -68,9 +68,10 @@ public class CompactionIT extends MacTest {
     for (int count = 0; count < 5; count++) {
       List<Thread> threads = new ArrayList<Thread>();
       final int span = 500000 / 59;
-      for (int i = 0; i < 500000; i += 500000/59 ) {
+      for (int i = 0; i < 500000; i += 500000 / 59) {
         final int finalI = i;
         Thread t = new Thread() {
+          @Override
           public void run() {
             try {
               VerifyIngest.Opts opts = new VerifyIngest.Opts();
@@ -97,13 +98,14 @@ public class CompactionIT extends MacTest {
     assertTrue(finalCount < beforeCount);
     assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
   }
-
+  
   private int countFiles(Connector c) throws Exception {
     Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    s.fetchColumnFamily(MetadataTable.TABLET_COLUMN_FAMILY);
-    s.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+    s.fetchColumnFamily(MetadataSchema.TabletsSection.TabletColumnFamily.NAME);
+    s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
     int i = 0;
-    for (@SuppressWarnings("unused") Entry<Key,Value> entry : s)
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> entry : s)
       i++;
     return i;
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
index 96e425c..6e4e1a7 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
@@ -44,7 +44,7 @@ public class DynamicThreadPoolsIT extends MacTest {
     cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "1"));
   }
   
-  @Test(timeout=90*1000)
+  @Test(timeout = 90 * 1000)
   public void test() throws Exception {
     Connector c = getConnector();
     TestIngest.Opts opts = new TestIngest.Opts();
@@ -74,7 +74,7 @@ public class DynamicThreadPoolsIT extends MacTest {
         if (client != null)
           MasterClient.close(client);
       }
-      for (TabletServerStatus server: stats.tServerInfo) {
+      for (TabletServerStatus server : stats.tServerInfo) {
         for (TableInfo table : server.tableMap.values()) {
           count += table.majors.running;
         }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
index 1466153..4af2c96 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
@@ -37,8 +37,9 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster.LogWriter;
 import org.apache.accumulo.test.TestIngest;
@@ -51,8 +52,8 @@ public class FunctionalTestUtils {
     Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     String tableId = c.tableOperations().tableIdMap().get(tableName);
     scanner.setRange(new Range(new Text(tableId + ";"), true, new Text(tableId + "<"), true));
-    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
-    MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
+    MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
     
     HashMap<Text,Integer> tabletFileCounts = new HashMap<Text,Integer>();
     
@@ -63,7 +64,7 @@ public class FunctionalTestUtils {
       Integer count = tabletFileCounts.get(row);
       if (count == null)
         count = 0;
-      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME)) {
         count = count + 1;
       }
       
@@ -88,7 +89,7 @@ public class FunctionalTestUtils {
     fs.delete(failPath, true);
     fs.mkdirs(failPath);
     
-   c.tableOperations().importDirectory(table, dir, failDir, false);
+    c.tableOperations().importDirectory(table, dir, failDir, false);
     
     if (fs.listStatus(failPath).length > 0) {
       throw new Exception("Some files failed to bulk import");
@@ -159,6 +160,5 @@ public class FunctionalTestUtils {
   static Mutation nm(String row, String cf, String cq, String value) {
     return nm(row, cf, cq, new Value(value.getBytes()));
   }
-
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
index eb5c0fa..3d1e787 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
@@ -28,9 +28,10 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.minicluster.MemoryUnit;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
@@ -47,15 +48,15 @@ public class GarbageCollectorIT extends MacTest {
   
   @Override
   public void configure(MiniAccumuloConfig cfg) {
-    Map<String, String> settings = new HashMap<String, String>();
+    Map<String,String> settings = new HashMap<String,String>();
     settings.put(Property.GC_CYCLE_START.getKey(), "1");
     settings.put(Property.GC_CYCLE_DELAY.getKey(), "1");
     settings.put(Property.TSERV_MAXMEM.getKey(), "5K");
     settings.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
     cfg.setSiteConfig(settings);
   }
-
-  @Test(timeout=60*1000)
+  
+  @Test(timeout = 60 * 1000)
   public void gcTest() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -75,31 +76,32 @@ public class GarbageCollectorIT extends MacTest {
       before = more;
     }
     Process gc = cluster.exec(SimpleGarbageCollector.class);
-    UtilWaitThread.sleep(5*1000);
+    UtilWaitThread.sleep(5 * 1000);
     int after = countFiles();
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
     assertTrue(after < before);
     gc.destroy();
   }
   
-  @Test(timeout=60*1000)
+  @Test(timeout = 60 * 1000)
   public void gcLotsOfCandidatesIT() throws Exception {
     log.info("Filling !METADATA table with bogus delete flags");
     Connector c = getConnector();
     addEntries(c, new BatchWriterOpts());
     cluster.getConfig().setDefaultMemory(10, MemoryUnit.MEGABYTE);
     Process gc = cluster.exec(SimpleGarbageCollector.class);
-    UtilWaitThread.sleep(10*1000);
+    UtilWaitThread.sleep(10 * 1000);
     String output = FunctionalTestUtils.readAll(cluster, SimpleGarbageCollector.class, gc);
     gc.destroy();
     assertTrue(output.contains("delete candidates has exceeded"));
   }
-
+  
   private int countFiles() throws Exception {
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
     int result = 0;
-    Path path = new Path(cluster.getConfig().getDir()+"/accumulo/tables/1/*/*.rf");
-    for (@SuppressWarnings("unused") FileStatus entry : fs.globStatus(path)) {
+    Path path = new Path(cluster.getConfig().getDir() + "/accumulo/tables/1/*/*.rf");
+    for (@SuppressWarnings("unused")
+    FileStatus entry : fs.globStatus(path)) {
       result++;
     }
     return result;
@@ -111,7 +113,7 @@ public class GarbageCollectorIT extends MacTest {
     
     for (int i = 0; i < 100000; ++i) {
       final Text emptyText = new Text("");
-      Text row = new Text(String.format("%s%s%020d%s", MetadataTable.DELETED_RANGE.getStartKey().getRow().toString(), "/", i,
+      Text row = new Text(String.format("%s%s%020d%s", MetadataSchema.DeletesSection.getRowPrefix(), "/", i,
           "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj"));
       Mutation delFlag = new Mutation(row);
       delFlag.put(emptyText, emptyText, new Value(new byte[] {}));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
index bbfdcbe..2041232 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
@@ -16,7 +16,8 @@
  */
 package org.apache.accumulo.test.functional;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.util.Map.Entry;
 import java.util.SortedSet;
@@ -26,16 +27,17 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
 public class MergeMetaIT extends MacTest {
   
-  @Test(timeout=30*1000)
+  @Test(timeout = 30 * 1000)
   public void mergeMeta() throws Exception {
     Connector c = getConnector();
     SortedSet<Text> splits = new TreeSet<Text>();
@@ -47,11 +49,12 @@ public class MergeMetaIT extends MacTest {
       c.tableOperations().create(tableName);
     }
     c.tableOperations().merge(MetadataTable.NAME, null, null);
-    UtilWaitThread.sleep(2*1000);
+    UtilWaitThread.sleep(2 * 1000);
     Scanner s = c.createScanner(RootTable.NAME, Authorizations.EMPTY);
-    s.setRange(MetadataTable.DELETED_RANGE);
+    s.setRange(MetadataSchema.DeletesSection.getRange());
     int count = 0;
-    for (@SuppressWarnings("unused") Entry<Key,Value> e : s) {
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> e : s) {
       count++;
     }
     assertTrue(count > 0);


[40/50] [abbrv] git commit: ACCUMULO-1567 ignore recently split tablets when complaining about overlapping assignments

Posted by ct...@apache.org.
ACCUMULO-1567 ignore recently split tablets when complaining about overlapping assignments

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1502587 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/37d2fdb7
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/37d2fdb7
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/37d2fdb7

Branch: refs/heads/ACCUMULO-1496
Commit: 37d2fdb7590ea7e9be220c5d0544f367107ca4eb
Parents: a904c3a
Author: Eric C. Newton <ec...@apache.org>
Authored: Fri Jul 12 15:29:44 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Fri Jul 12 15:29:44 2013 +0000

----------------------------------------------------------------------
 .../accumulo/server/tabletserver/TabletServer.java       | 11 +++++++++++
 1 file changed, 11 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/37d2fdb7/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index 52548bf..9d50f07 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -228,6 +228,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
   private static long gcTimeIncreasedCount;
   
   private static final long MAX_TIME_TO_WAIT_FOR_SCAN_RESULT_MILLIS = 1000;
+  private static final long RECENTLY_SPLIT_MILLIES = 60*1000;
   
   private TabletServerLogger logger;
   
@@ -1833,6 +1834,16 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
             Set<KeyExtent> unopenedOverlapping = KeyExtent.findOverlapping(extent, unopenedTablets);
             Set<KeyExtent> openingOverlapping = KeyExtent.findOverlapping(extent, openingTablets);
             Set<KeyExtent> onlineOverlapping = KeyExtent.findOverlapping(extent, onlineTablets);
+            
+            // ignore any tablets that have recently split
+            Iterator<KeyExtent> each = onlineOverlapping.iterator();
+            while (each.hasNext()) {
+              Tablet tablet = onlineTablets.get(each.next());
+              if (System.currentTimeMillis() - tablet.getSplitCreationTime() < RECENTLY_SPLIT_MILLIES) {
+                each.remove();
+              }
+            }
+            
             Set<KeyExtent> all = new HashSet<KeyExtent>();
             all.addAll(unopenedOverlapping);
             all.addAll(openingOverlapping);


[47/50] [abbrv] git commit: Merge remote branch 'asf/ACCUMULO-1496' into ACCUMULO-1496

Posted by ct...@apache.org.
Merge remote branch 'asf/ACCUMULO-1496' into ACCUMULO-1496

Conflicts:
	assemble/pom.xml
	assemble/src/main/assemblies/component.xml
	pom.xml
	start/pom.xml


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/fbe05ff9
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/fbe05ff9
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/fbe05ff9

Branch: refs/heads/ACCUMULO-1496
Commit: fbe05ff942e5afce3c052894df49d1f577019880
Parents: e01cf8d 5750b90
Author: Christopher Tubbs <ct...@apache.org>
Authored: Tue Jul 16 15:34:42 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Tue Jul 16 15:34:42 2013 -0400

----------------------------------------------------------------------
 assemble/pom.xml | 4 ++++
 pom.xml          | 5 +++++
 2 files changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/fbe05ff9/assemble/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/accumulo/blob/fbe05ff9/pom.xml
----------------------------------------------------------------------


[48/50] [abbrv] git commit: ACCUMULO-1496 remove unused artifact

Posted by ct...@apache.org.
ACCUMULO-1496 remove unused artifact


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/f8b1c648
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/f8b1c648
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/f8b1c648

Branch: refs/heads/ACCUMULO-1496
Commit: f8b1c6481abafed35739262751fd26633a5c5ada
Parents: fbe05ff
Author: Christopher Tubbs <ct...@apache.org>
Authored: Tue Jul 16 15:42:34 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Tue Jul 16 15:42:34 2013 -0400

----------------------------------------------------------------------
 assemble/pom.xml | 4 ----
 1 file changed, 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/f8b1c648/assemble/pom.xml
----------------------------------------------------------------------
diff --git a/assemble/pom.xml b/assemble/pom.xml
index 3dbe6ae..e7daa0b 100644
--- a/assemble/pom.xml
+++ b/assemble/pom.xml
@@ -47,10 +47,6 @@
       <artifactId>jline</artifactId>
     </dependency>
     <dependency>
-      <groupId>net.sf.scannotation</groupId>
-      <artifactId>scannotation</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-core</artifactId>
     </dependency>


[34/50] [abbrv] git commit: ACCUMULO-998 applying Micheal Allen's updated patch for at-rest encryption

Posted by ct...@apache.org.
ACCUMULO-998 applying Micheal Allen's updated patch for at-rest encryption

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1502316 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/65b5a3a3
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/65b5a3a3
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/65b5a3a3

Branch: refs/heads/ACCUMULO-1496
Commit: 65b5a3a3708dc96e09e95c00a0fbe72e60edf3ad
Parents: 3b624e1
Author: Eric C. Newton <ec...@apache.org>
Authored: Thu Jul 11 18:17:04 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Thu Jul 11 18:17:04 2013 +0000

----------------------------------------------------------------------
 conf/examples/crypto/accumulo-site.xml          | 160 +++++
 conf/examples/crypto/readme.txt                 |   5 +
 .../org/apache/accumulo/core/conf/Property.java |  84 +--
 .../accumulo/core/conf/SiteConfiguration.java   |  11 +
 .../file/blockfile/impl/CachableBlockFile.java  |   2 +-
 .../accumulo/core/file/rfile/bcfile/BCFile.java | 383 ++++++++++-
 .../core/file/rfile/bcfile/Compression.java     |   4 +-
 .../core/security/crypto/CryptoModule.java      | 110 ++--
 .../security/crypto/CryptoModuleFactory.java    | 184 +++---
 .../security/crypto/CryptoModuleParameters.java | 629 ++++++++++++++++++
 .../security/crypto/DefaultCryptoModule.java    | 483 +++++++++-----
 .../crypto/DefaultCryptoModuleUtils.java        |   1 -
 .../DefaultSecretKeyEncryptionStrategy.java     | 326 ++++-----
 .../crypto/DiscardCloseOutputStream.java        |  39 ++
 .../crypto/SecretKeyEncryptionStrategy.java     |   7 +-
 .../accumulo/core/file/rfile/RFileTest.java     | 658 ++++++++++++++++++-
 .../core/security/crypto/CryptoTest.java        | 390 +++++++++++
 .../test/resources/crypto-off-accumulo-site.xml | 111 ++++
 .../test/resources/crypto-on-accumulo-site.xml  | 164 +++++
 ...rypto-on-no-key-encryption-accumulo-site.xml | 144 ++++
 pom.xml                                         |   2 +-
 .../server/tabletserver/log/DfsLogger.java      |  24 +-
 .../server/tabletserver/log/LogSorter.java      |  61 +-
 23 files changed, 3317 insertions(+), 665 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/conf/examples/crypto/accumulo-site.xml
----------------------------------------------------------------------
diff --git a/conf/examples/crypto/accumulo-site.xml b/conf/examples/crypto/accumulo-site.xml
new file mode 100644
index 0000000..77bf8db
--- /dev/null
+++ b/conf/examples/crypto/accumulo-site.xml
@@ -0,0 +1,160 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+	<!--
+  Put your site-specific accumulo configurations here.
+
+  The available configuration values along with their defaults
+  are documented in docs/config.html
+
+  Unless you are simply testing at your workstation, you will most
+  definitely need to change the three entries below.
+	-->
+
+    <property>
+      <name>instance.zookeeper.host</name>
+      <value>localhost:2181</value>
+      <description>comma separated list of zookeeper servers</description>
+    </property>
+
+    <property>
+      <name>logger.dir.walog</name>
+      <value>walogs</value>
+      <description>The directory used to store write-ahead logs on the local filesystem. It is possible to specify a comma-separated list of directories.</description>
+    </property>
+
+    <property>
+      <name>instance.secret</name>
+      <value>DEFAULT</value>
+      <description>A secret unique to a given instance that all servers must know in order to communicate with one another.
+                   Change it before initialization. To change it later use ./bin/accumulo org.apache.accumulo.server.util.ChangeSecret [oldpasswd] [newpasswd],
+                   and then update this file.
+      </description>
+    </property>
+
+    <property>
+      <name>tserver.memory.maps.max</name>
+      <value>80M</value>
+    </property>
+
+    <property>
+      <name>tserver.cache.data.size</name>
+      <value>7M</value>
+    </property>
+
+    <property>
+      <name>tserver.cache.index.size</name>
+      <value>20M</value>
+    </property>
+
+    <property>
+      <name>trace.password</name>
+      <!--
+        change this to the root user's password, and/or change the user below
+       -->
+      <value>password</value>
+    </property>
+
+    <property>
+      <name>trace.user</name>
+      <value>root</value>
+    </property>
+
+    <property>
+      <name>tserver.sort.buffer.size</name>
+      <value>50M</value>
+    </property>
+
+    <property>
+      <name>tserver.walog.max.size</name>
+      <value>100M</value>
+    </property>
+
+    <property>
+      <name>general.classpaths</name>
+      <value>
+    $ACCUMULO_HOME/server/target/classes/,
+    $ACCUMULO_HOME/core/target/classes/,
+    $ACCUMULO_HOME/start/target/classes/,
+    $ACCUMULO_HOME/fate/target/classes/,
+    $ACCUMULO_HOME/proxy/target/classes/,
+    $ACCUMULO_HOME/examples/target/classes/,
+	$ACCUMULO_HOME/lib/[^.].$ACCUMULO_VERSION.jar,
+	$ACCUMULO_HOME/lib/[^.].*.jar,
+	$ZOOKEEPER_HOME/zookeeper[^.].*.jar,
+	$HADOOP_CONF_DIR,
+	$HADOOP_PREFIX/[^.].*.jar,
+	$HADOOP_PREFIX/lib/[^.].*.jar,
+      </value>
+      <description>Classpaths that accumulo checks for updates and class files.
+      When using the Security Manager, please remove the ".../target/classes/" values.
+      </description>
+    </property>
+
+    <property>
+      <name>crypto.module.class</name>
+      <value>org.apache.accumulo.core.security.crypto.DefaultCryptoModule</value>
+    </property>
+    <property>
+      <name>crypto.cipher.suite</name>
+      <value>AES/CFB/PKCS5Padding</value>
+    </property>
+    <property>
+      <name>crypto.cipher.algorithm.name</name>
+      <value>AES</value>
+    </property>
+    <property>
+      <name>crypto.cipher.key.length</name>
+      <value>128</value>
+    </property>
+    <property>
+      <name>crypto.secure.rng</name>
+      <value>SHA1PRNG</value>
+    </property>
+    <property>
+      <name>crypto.secure.rng.provider</name>
+      <value>SUN</value>
+    </property>
+
+    <property>
+      <name>crypto.secret.key.encryption.strategy.class</name>
+      <value>org.apache.accumulo.core.security.crypto.DefaultSecretKeyEncryptionStrategy</value>
+    </property>
+
+    <property>
+        <name>crypto.default.key.strategy.cipher.suite</name>
+        <value>AES/ECB/NoPadding</value>
+    </property>
+<!--
+     These properties can be useful for situations where you are conducting unit tests without HDFS
+    running.
+-->
+
+<!--
+    <property>
+      <name>crypto.default.key.strategy.hdfs.uri</name>
+      <value>file:///</value>
+    </property>
+    <property>
+      <name>crypto.default.key.strategy.key.location</name>
+      <value>/tmp/test.secret.key</value>
+    </property>
+-->
+</configuration>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/conf/examples/crypto/readme.txt
----------------------------------------------------------------------
diff --git a/conf/examples/crypto/readme.txt b/conf/examples/crypto/readme.txt
new file mode 100644
index 0000000..c638b6e
--- /dev/null
+++ b/conf/examples/crypto/readme.txt
@@ -0,0 +1,5 @@
+This accumulo-site.xml file demonstrates how to configure the basic encryption at rest feature.  
+
+The default configuration shown here is not entirely secure, as the master key for all encryption keys
+is stored alongside the encrypted files.  Placing that master key somewhere secure is an exercise
+left to the reader.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/conf/Property.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 8572a2e..ca86c9a 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -59,11 +59,20 @@ public enum Property {
       "The class Accumulo should use for its key encryption strategy."),
   @Experimental
   CRYPTO_DEFAULT_KEY_STRATEGY_HDFS_URI("crypto.default.key.strategy.hdfs.uri", "", PropertyType.STRING,
-      "The URL Accumulo should use to connect to DFS. If this is blank, Accumulo will obtain this information from the Hadoop configuration"),
+      "The path relative to the top level instance directory (instance.dfs.dir) where to store the key encryption key within HDFS."),
   @Experimental
-  CRYPTO_DEFAULT_KEY_STRATEGY_KEY_LOCATION("crypto.default.key.strategy.key.location", "/accumulo/crypto/secret/keyEncryptionKey", PropertyType.ABSOLUTEPATH,
-      "The absolute path of where to store the key encryption key within HDFS."),
-  
+  CRYPTO_DEFAULT_KEY_STRATEGY_KEY_LOCATION("crypto.default.key.strategy.key.location", "/crypto/secret/keyEncryptionKey", PropertyType.ABSOLUTEPATH,
+      "The path relative to the top level instance directory (instance.dfs.dir) where to store the key encryption key within HDFS."),
+  @Experimental
+  CRYPTO_DEFAULT_KEY_STRATEGY_CIPHER_SUITE("crypto.default.key.strategy.cipher.suite", "NullCipher", PropertyType.STRING,
+      "The cipher suite to use when encrypting session keys with a key encryption key.  This should be set to match the overall encryption algorithm  " +
+      "but with ECB mode and no padding unless you really know what you're doing and are sure you won't break internal file formats"),
+  @Experimental
+  CRYPTO_OVERRIDE_KEY_STRATEGY_WITH_CONFIGURED_STRATEGY("crypto.override.key.strategy.with.configured.strategy", "false", PropertyType.BOOLEAN,
+          "The default behavior is to record the key encryption strategy with the encrypted file, and continue to use that strategy for the life " +
+          "of that file.  Sometimes, you change your strategy and want to use the new strategy, not the old one.  (Most commonly, this will be " +
+          "because you have moved key material from one spot to another.)  If you want to override the recorded key strategy with the one in " +
+          "the configuration file, set this property to true."),
   // instance properties (must be the same for every node in an instance)
   INSTANCE_PREFIX("instance.", null, PropertyType.PREFIX,
       "Properties in this category must be consistent throughout a cloud. This is enforced and servers won't be able to communicate if these differ."),
@@ -86,7 +95,7 @@ public enum Property {
       "The authorizor class that accumulo will use to determine what labels a user has privilege to see"),
   INSTANCE_SECURITY_PERMISSION_HANDLER("instance.security.permissionHandler", "org.apache.accumulo.server.security.handler.ZKPermHandler",
       PropertyType.CLASSNAME, "The permission handler class that accumulo will use to determine if a user has privilege to perform an action"),
-  
+
   // general properties
   GENERAL_PREFIX("general.", null, PropertyType.PREFIX,
       "Properties in this category affect the behavior of accumulo overall, but do not have to be consistent throughout a cloud."),
@@ -103,7 +112,6 @@ public enum Property {
   GENERAL_MAX_MESSAGE_SIZE("tserver.server.message.size.max", "1G", PropertyType.MEMORY, "The maximum size of a message that can be sent to a tablet server."),
   GENERAL_VOLUME_CHOOSER("general.volume.chooser", "org.apache.accumulo.server.fs.RandomVolumeChooser", PropertyType.CLASSNAME, "The class that will be used to select which volume will be used to create new files."),
 
-  
   // properties that are specific to master server behavior
   MASTER_PREFIX("master.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of the master server"),
   MASTER_CLIENTPORT("master.port.client", "9999", PropertyType.PORT, "The port used for handling client connections on the master"),
@@ -124,7 +132,7 @@ public enum Property {
       "A class that implements a mechansim to steal write access to a file"),
   MASTER_FATE_THREADPOOL_SIZE("master.fate.threadpool.size", "4", PropertyType.COUNT,
       "The number of threads used to run FAult-Tolerant Executions.  These are primarily table operations like merge."),
-  
+
   // properties that are specific to tablet server behavior
   TSERV_PREFIX("tserver.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of the tablet servers"),
   TSERV_CLIENT_TIMEOUT("tserver.client.timeout", "3s", PropertyType.TIMEDURATION, "Time to wait for clients to continue scans before closing a session."),
@@ -212,14 +220,14 @@ public enum Property {
       "The number of threads for the distributed workq.  These threads are used for copying failed bulk files."),
   TSERV_WAL_SYNC("tserver.wal.sync", "true", PropertyType.BOOLEAN,
       "Use the SYNC_BLOCK create flag to sync WAL writes to disk. Prevents problems recovering from sudden system resets."),
-  
+
   // properties that are specific to logger server behavior
   LOGGER_PREFIX("logger.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of the write-ahead logger servers"),
   LOGGER_DIR("logger.dir.walog", "walogs", PropertyType.PATH,
       "The property only needs to be set if upgrading from 1.4 which used to store write-ahead logs on the local filesystem. In 1.5 write-ahead logs are "
           + "stored in DFS.  When 1.5 is started for the first time it will copy any 1.4 write ahead logs into DFS.  It is possible to specify a "
           + "comma-separated list of directories."),
-  
+
   // accumulo garbage collector properties
   GC_PREFIX("gc.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of the accumulo garbage collector."),
   GC_CYCLE_START("gc.cycle.start", "30s", PropertyType.TIMEDURATION, "Time to wait before attempting to garbage collect any old files."),
@@ -228,7 +236,7 @@ public enum Property {
   GC_PORT("gc.port.client", "50091", PropertyType.PORT, "The listening port for the garbage collector's monitor service"),
   GC_DELETE_THREADS("gc.threads.delete", "16", PropertyType.COUNT, "The number of threads used to delete files"),
   GC_TRASH_IGNORE("gc.trash.ignore", "false", PropertyType.BOOLEAN, "Do not use the Trash, even if it is configured"),
-  
+
   // properties that are specific to the monitor server behavior
   MONITOR_PREFIX("monitor.", null, PropertyType.PREFIX, "Properties in this category affect the behavior of the monitor web server."),
   MONITOR_PORT("monitor.port.client", "50095", PropertyType.PORT, "The listening port for the monitor's http service"),
@@ -237,6 +245,7 @@ public enum Property {
   MONITOR_BANNER_COLOR("monitor.banner.color", "#c4c4c4", PropertyType.STRING, "The color of the banner text displayed on the monitor page."),
   MONITOR_BANNER_BACKGROUND("monitor.banner.background", "#304065", PropertyType.STRING,
       "The background color of the banner text displayed on the monitor page."),
+
   @Experimental
   MONITOR_SSL_KEYSTORE("monitor.ssl.keyStore", "", PropertyType.PATH, "The keystore for enabling monitor SSL."),
   @Experimental
@@ -258,7 +267,7 @@ public enum Property {
   TRACE_TOKEN_PROPERTY_PREFIX("trace.token.property", null, PropertyType.PREFIX,
       "The prefix used to create a token for storing distributed traces.  For each propetry required by trace.token.type, place this prefix in front of it."),
   TRACE_TOKEN_TYPE("trace.token.type", PasswordToken.class.getName(), PropertyType.CLASSNAME, "An AuthenticationToken type supported by the authorizer"),
-  
+
   // per table properties
   TABLE_PREFIX("table.", null, PropertyType.PREFIX, "Properties in this category affect tablet server treatment of tablets, but can be configured "
       + "on a per-table basis. Setting these properties in the site file will override the default globally "
@@ -353,7 +362,7 @@ public enum Property {
   TABLE_INTERPRETER_CLASS("table.interepreter", DefaultScanInterpreter.class.getName(), PropertyType.STRING,
       "The ScanInterpreter class to apply on scan arguments in the shell"),
   TABLE_CLASSPATH("table.classpath.context", "", PropertyType.STRING, "Per table classpath context"),
-  
+
   // VFS ClassLoader properties
   VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY(AccumuloVFSClassLoader.VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY, "", PropertyType.STRING,
       "Configuration for a system level vfs classloader.  Accumulo jar can be configured here and loaded out of HDFS."),
@@ -376,6 +385,7 @@ public enum Property {
   
   ;
   
+
   private String key, defaultValue, description;
   private PropertyType type;
   static Logger log = Logger.getLogger(Property.class);
@@ -386,20 +396,20 @@ public enum Property {
     this.description = description;
     this.type = type;
   }
-  
+
   @Override
   public String toString() {
     return this.key;
   }
-  
+
   public String getKey() {
     return this.key;
   }
-  
+
   public String getRawDefaultValue() {
     return this.defaultValue;
   }
-  
+
   public String getDefaultValue() {
     if (isInterpolated()) {
       PropertiesConfiguration pconf = new PropertiesConfiguration();
@@ -414,19 +424,19 @@ public enum Property {
       return getRawDefaultValue();
     }
   }
-  
+
   public PropertyType getType() {
     return this.type;
   }
-  
+
   public String getDescription() {
     return this.description;
   }
-  
+
   private boolean isInterpolated() {
     return hasAnnotation(Interpolated.class) || hasPrefixWithAnnotation(getKey(), Interpolated.class);
   }
-  
+
   public boolean isExperimental() {
     return hasAnnotation(Experimental.class) || hasPrefixWithAnnotation(getKey(), Experimental.class);
   }
@@ -472,25 +482,25 @@ public enum Property {
     }
     return false;
   }
-  
+
   private static HashSet<String> validTableProperties = null;
   private static HashSet<String> validProperties = null;
   private static HashSet<String> validPrefixes = null;
-  
+
   private static boolean isKeyValidlyPrefixed(String key) {
     for (String prefix : validPrefixes) {
       if (key.startsWith(prefix))
         return true;
     }
-    
+
     return false;
   }
-  
+
   public synchronized static boolean isValidPropertyKey(String key) {
     if (validProperties == null) {
       validProperties = new HashSet<String>();
       validPrefixes = new HashSet<String>();
-      
+
       for (Property p : Property.values()) {
         if (p.getType().equals(PropertyType.PREFIX)) {
           validPrefixes.add(p.getKey());
@@ -499,10 +509,10 @@ public enum Property {
         }
       }
     }
-    
+
     return validProperties.contains(key) || isKeyValidlyPrefixed(key);
   }
-  
+
   public synchronized static boolean isValidTablePropertyKey(String key) {
     if (validTableProperties == null) {
       validTableProperties = new HashSet<String>();
@@ -512,36 +522,36 @@ public enum Property {
         }
       }
     }
-    
+
     return validTableProperties.contains(key) || key.startsWith(Property.TABLE_CONSTRAINT_PREFIX.getKey())
         || key.startsWith(Property.TABLE_ITERATOR_PREFIX.getKey()) || key.startsWith(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey());
   }
-  
+
   private static final EnumSet<Property> fixedProperties = EnumSet.of(Property.TSERV_CLIENTPORT, Property.TSERV_NATIVEMAP_ENABLED,
       Property.TSERV_SCAN_MAX_OPENFILES, Property.MASTER_CLIENTPORT, Property.GC_PORT);
-  
+
   public static boolean isFixedZooPropertyKey(Property key) {
     return fixedProperties.contains(key);
   }
-  
+
   public static Set<Property> getFixedProperties() {
     return fixedProperties;
   }
-  
+
   public static boolean isValidZooPropertyKey(String key) {
     // white list prefixes
     return key.startsWith(Property.TABLE_PREFIX.getKey()) || key.startsWith(Property.TSERV_PREFIX.getKey()) || key.startsWith(Property.LOGGER_PREFIX.getKey())
         || key.startsWith(Property.MASTER_PREFIX.getKey()) || key.startsWith(Property.GC_PREFIX.getKey())
         || key.startsWith(Property.MONITOR_PREFIX.getKey() + "banner.") || key.startsWith(VFS_CONTEXT_CLASSPATH_PROPERTY.getKey());
   }
-  
+
   public static Property getPropertyByKey(String key) {
     for (Property prop : Property.values())
       if (prop.getKey().equals(key))
         return prop;
     return null;
   }
-  
+
   /**
    * @return true if this is a property whose value is expected to be a java class
    */
@@ -550,11 +560,11 @@ public enum Property {
         || (key.startsWith(Property.TABLE_ITERATOR_PREFIX.getKey()) && key.substring(Property.TABLE_ITERATOR_PREFIX.getKey().length()).split("\\.").length == 2)
         || key.equals(Property.TABLE_LOAD_BALANCER.getKey());
   }
-  
+
   public static <T> T createInstanceFromPropertyName(AccumuloConfiguration conf, Property property, Class<T> base, T defaultInstance) {
     String clazzName = conf.get(property);
     T instance = null;
-    
+
     try {
       Class<? extends T> clazz = AccumuloVFSClassLoader.loadClass(clazzName, base);
       instance = clazz.newInstance();
@@ -562,7 +572,7 @@ public enum Property {
     } catch (Exception e) {
       log.warn("Failed to load class ", e);
     }
-    
+
     if (instance == null) {
       log.info("Using " + defaultInstance.getClass().getName());
       instance = defaultInstance;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java b/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
index b8e1337..a1e2572 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
@@ -90,6 +90,17 @@ public class SiteConfiguration extends AccumuloConfiguration {
     getXmlConfig().clear();
   }
   
+  
+  /**
+   * method here to support testing, do not call
+   */
+  public synchronized void clearAndNull() {
+    if (xmlConfig != null) {
+      xmlConfig.clear();
+      xmlConfig = null;
+    }
+  }
+  
   /**
    * method here to support testing, do not call
    */

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java b/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
index d788f39..5e46215 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/blockfile/impl/CachableBlockFile.java
@@ -130,7 +130,7 @@ public class CachableBlockFile {
     @Override
     public long getStartPos() throws IOException {
       return _ba.getStartPos();
-    }
+    }    
     
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java
index 7277c65..35cd82e 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/BCFile.java
@@ -28,9 +28,12 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.Map;
 import java.util.TreeMap;
 
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
 import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.BlockRead;
 import org.apache.accumulo.core.file.rfile.bcfile.CompareUtils.Scalar;
@@ -38,6 +41,10 @@ import org.apache.accumulo.core.file.rfile.bcfile.CompareUtils.ScalarComparator;
 import org.apache.accumulo.core.file.rfile.bcfile.CompareUtils.ScalarLong;
 import org.apache.accumulo.core.file.rfile.bcfile.Compression.Algorithm;
 import org.apache.accumulo.core.file.rfile.bcfile.Utils.Version;
+import org.apache.accumulo.core.security.crypto.CryptoModule;
+import org.apache.accumulo.core.security.crypto.CryptoModuleFactory;
+import org.apache.accumulo.core.security.crypto.CryptoModuleParameters;
+import org.apache.accumulo.core.security.crypto.SecretKeyEncryptionStrategy;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -54,7 +61,8 @@ import org.apache.hadoop.io.compress.Decompressor;
 public final class BCFile {
   // the current version of BCFile impl, increment them (major or minor) made
   // enough changes
-  static final Version API_VERSION = new Version((short) 1, (short) 0);
+  static final Version API_VERSION = new Version((short) 2, (short) 0);
+  static final Version API_VERSION_1 = new Version((short) 1, (short) 0);
   static final Log LOG = LogFactory.getLog(BCFile.class);
   
   /**
@@ -70,6 +78,10 @@ public final class BCFile {
   static public class Writer implements Closeable {
     private final FSDataOutputStream out;
     private final Configuration conf;
+    private final CryptoModule cryptoModule;
+    private final Map<String,String> cryptoConf;
+    private BCFileCryptoModuleParameters cryptoParams;
+    private SecretKeyEncryptionStrategy secretKeyEncryptionStrategy;
     // the single meta block containing index of compressed data blocks
     final DataIndex dataIndex;
     // index for meta blocks
@@ -80,6 +92,7 @@ public final class BCFile {
     long errorCount = 0;
     // reusable buffers.
     private BytesWritable fsOutputBuffer;
+
     
     /**
      * Call-back interface to register a block after a block is closed.
@@ -106,6 +119,7 @@ public final class BCFile {
       private Compressor compressor; // !null only if using native
       // Hadoop compression
       private final FSDataOutputStream fsOut;
+      private final OutputStream cipherOut;
       private final long posStart;
       private final SimpleBufferedOutputStream fsBufferedOutput;
       private OutputStream out;
@@ -113,20 +127,60 @@ public final class BCFile {
       /**
        * @param compressionAlgo
        *          The compression algorithm to be used to for compression.
+       * @param cryptoModule the module to use to obtain cryptographic streams
+       * @param cryptoParams TODO
        * @throws IOException
        */
-      public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut, BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
+      public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut, BytesWritable fsOutputBuffer, Configuration conf, CryptoModule cryptoModule, CryptoModuleParameters cryptoParams) throws IOException {
         this.compressAlgo = compressionAlgo;
         this.fsOut = fsOut;
         this.posStart = fsOut.getPos();
         
         fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));
-        
+
         this.fsBufferedOutput = new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
+        
+        // *This* is very important.  Without this, when the crypto stream is closed (in order to flush its last bytes),
+        // the underlying RFile stream will *also* be closed, and that's undesirable as the cipher stream is closed for 
+        // every block written.
+        cryptoParams.setCloseUnderylingStreamAfterCryptoStreamClose(false);
+        
+        // *This* is also very important.  We don't want the underlying stream messed with.
+        cryptoParams.setRecordParametersToStream(false);
+        
+        // It is also important to make sure we get a new initialization vector on every call in here,
+        // so set any existing one to null, in case we're reusing a parameters object for its RNG or other bits
+        cryptoParams.setInitializationVector(null);
+        
+        // Initialize the cipher including generating a new IV
+        cryptoParams = cryptoModule.initializeCipher(cryptoParams);
+        
+        // Write the init vector in plain text, uncompressed, to the output stream.  Due to the way the streams work out, there's no good way to write this compressed, but it's pretty small.
+        DataOutputStream tempDataOutputStream = new DataOutputStream(fsBufferedOutput);
+
+        // Init vector might be null if the underlying cipher does not require one (NullCipher being a good example)
+        if (cryptoParams.getInitializationVector() != null) {
+          tempDataOutputStream.writeInt(cryptoParams.getInitializationVector().length);
+          tempDataOutputStream.write(cryptoParams.getInitializationVector());
+        } else {
+          // Do nothing
+        }
+       
+        // Initialize the cipher stream and get the IV 
+        cryptoParams.setPlaintextOutputStream(tempDataOutputStream);
+        cryptoParams = cryptoModule.getEncryptingOutputStream(cryptoParams);
+        
+        if (cryptoParams.getEncryptedOutputStream() == tempDataOutputStream) {
+          this.cipherOut = fsBufferedOutput;
+        } else {        
+          this.cipherOut = cryptoParams.getEncryptedOutputStream();
+        }
+        
+        
         this.compressor = compressAlgo.getCompressor();
         
         try {
-          this.out = compressionAlgo.createCompressionStream(fsBufferedOutput, compressor, 0);
+          this.out = compressionAlgo.createCompressionStream(cipherOut, compressor, 0);
         } catch (IOException e) {
           compressAlgo.returnCompressor(compressor);
           throw e;
@@ -173,6 +227,18 @@ public final class BCFile {
         try {
           if (out != null) {
             out.flush();
+            
+            // If the cipherOut stream is different from the fsBufferedOutput stream, then we likely have
+            // an actual encrypted output stream that needs to be closed in order for it 
+            // to flush the final bytes to the output stream.  We should have set the flag to
+            // make sure that this close does *not* close the underlying stream, so calling
+            // close here should do the write thing.
+            
+            if (fsBufferedOutput != cipherOut) {
+              // Close the cipherOutputStream
+              cipherOut.close();
+            }
+            
             out = null;
           }
         } finally {
@@ -256,7 +322,7 @@ public final class BCFile {
           closed = true;
           blkInProgress = false;
         }
-      }
+      }     
     }
     
     /**
@@ -280,6 +346,29 @@ public final class BCFile {
       metaIndex = new MetaIndex();
       fsOutputBuffer = new BytesWritable();
       Magic.write(fout);
+      
+      // Set up crypto-related detail, including secret key generation and encryption
+      
+      @SuppressWarnings("deprecation")
+      AccumuloConfiguration accumuloConfiguration = AccumuloConfiguration.getSiteConfiguration();
+      this.cryptoConf = accumuloConfiguration.getAllPropertiesWithPrefix(Property.CRYPTO_PREFIX);
+
+      this.cryptoModule = CryptoModuleFactory.getCryptoModule(accumuloConfiguration);
+      Map<String,String> instanceProperties = accumuloConfiguration.getAllPropertiesWithPrefix(Property.INSTANCE_PREFIX);
+      if (instanceProperties != null) {
+        this.cryptoConf.putAll(instanceProperties);
+      }
+
+      this.cryptoParams = new BCFileCryptoModuleParameters();
+      CryptoModuleFactory.fillParamsObjectFromStringMap(cryptoParams, cryptoConf);
+      this.cryptoParams = (BCFileCryptoModuleParameters) cryptoModule.generateNewRandomSessionKey(cryptoParams);
+      
+      this.secretKeyEncryptionStrategy = CryptoModuleFactory.getSecretKeyEncryptionStrategy(accumuloConfiguration);      
+      this.cryptoParams = (BCFileCryptoModuleParameters) secretKeyEncryptionStrategy.encryptSecretKey(cryptoParams);
+      
+      
+      // secretKeyEncryptionStrategy.encryptSecretKey(cryptoParameters);
+      
     }
     
     /**
@@ -306,11 +395,21 @@ public final class BCFile {
           
           long offsetIndexMeta = out.getPos();
           metaIndex.write(out);
-          
-          // Meta Index and the trailing section are written out directly.
-          out.writeLong(offsetIndexMeta);
-          
-          API_VERSION.write(out);
+
+          if (cryptoParams.getAlgorithmName() == null || 
+              cryptoParams.getAlgorithmName().equals(Property.CRYPTO_CIPHER_SUITE.getDefaultValue())) {
+            out.writeLong(offsetIndexMeta);
+            API_VERSION_1.write(out);
+          } else {
+            long offsetCryptoParameters = out.getPos();
+            cryptoParams.write(out);
+            
+            // Meta Index, crypto params offsets and the trailing section are written out directly.
+            out.writeLong(offsetIndexMeta);
+            out.writeLong(offsetCryptoParameters);
+            API_VERSION.write(out);
+          }
+
           Magic.write(out);
           out.flush();
         }
@@ -333,7 +432,7 @@ public final class BCFile {
       }
       
       MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
-      WBlockState wbs = new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
+      WBlockState wbs = new WBlockState(compressAlgo, out, fsOutputBuffer, conf, cryptoModule, cryptoParams);
       BlockAppender ba = new BlockAppender(mbr, wbs);
       blkInProgress = true;
       metaBlkSeen = true;
@@ -391,7 +490,7 @@ public final class BCFile {
       
       DataBlockRegister dbr = new DataBlockRegister();
       
-      WBlockState wbs = new WBlockState(getDefaultCompressionAlgorithm(), out, fsOutputBuffer, conf);
+      WBlockState wbs = new WBlockState(getDefaultCompressionAlgorithm(), out, fsOutputBuffer, conf, cryptoModule, cryptoParams);
       BlockAppender ba = new BlockAppender(dbr, wbs);
       blkInProgress = true;
       return ba;
@@ -429,17 +528,67 @@ public final class BCFile {
     }
   }
   
+  private static class BCFileCryptoModuleParameters extends CryptoModuleParameters {
+
+    public void write(DataOutput out) throws IOException {
+      // Write out the context
+      out.writeInt(getAllOptions().size());
+      for (String key : getAllOptions().keySet()) {
+        out.writeUTF(key);
+        out.writeUTF(getAllOptions().get(key));
+      }
+      
+      // Write the opaque ID
+      out.writeUTF(getOpaqueKeyEncryptionKeyID());
+      
+      // Write the encrypted secret key
+      out.writeInt(getEncryptedKey().length);
+      out.write(getEncryptedKey());
+      
+    }
+    
+    public void read(DataInput in) throws IOException {
+      
+      Map<String,String> optionsFromFile = new HashMap<String,String>();
+      
+      
+      
+      int numContextEntries = in.readInt();
+      for (int i = 0; i < numContextEntries; i++) {
+        optionsFromFile.put(in.readUTF(), in.readUTF());
+      }
+      
+      CryptoModuleFactory.fillParamsObjectFromStringMap(this, optionsFromFile);
+      
+      // Read opaque key encryption ID
+      setOpaqueKeyEncryptionKeyID(in.readUTF());
+      
+      // Read encrypted secret key
+      int encryptedSecretKeyLength = in.readInt();
+      byte[] encryptedSecretKey = new byte[encryptedSecretKeyLength];
+      in.readFully(encryptedSecretKey);
+      setEncryptedKey(encryptedSecretKey);
+      
+    }
+
+    
+  }
+    
   /**
    * BCFile Reader, interface to read the file's data and meta blocks.
    */
   static public class Reader implements Closeable {
     private static final String META_NAME = "BCFile.metaindex";
+    private static final String CRYPTO_BLOCK_NAME = "BCFile.cryptoparams";
     private final FSDataInputStream in;
     private final Configuration conf;
     final DataIndex dataIndex;
     // Index for meta blocks
     final MetaIndex metaIndex;
     final Version version;
+    private BCFileCryptoModuleParameters cryptoParams;
+    private CryptoModule cryptoModule;
+    private SecretKeyEncryptionStrategy secretKeyEncryptionStrategy;
     
     /**
      * Intermediate class that maintain the state of a Readable Compression Block.
@@ -450,14 +599,38 @@ public final class BCFile {
       private final BlockRegion region;
       private final InputStream in;
       
-      public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin, BlockRegion region, Configuration conf) throws IOException {
+      public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin, BlockRegion region, Configuration conf, CryptoModule cryptoModule, Version bcFileVersion, CryptoModuleParameters cryptoParams) throws IOException {
         this.compressAlgo = compressionAlgo;
         this.region = region;
         this.decompressor = compressionAlgo.getDecompressor();
         
+        BoundedRangeFileInputStream boundedRangeFileInputStream = new BoundedRangeFileInputStream(fsin, this.region.getOffset(), this.region.getCompressedSize());
+        InputStream inputStreamToBeCompressed = boundedRangeFileInputStream;
+        
+        if (cryptoParams != null && cryptoModule != null) {
+          DataInputStream tempDataInputStream = new DataInputStream(boundedRangeFileInputStream);
+          // Read the init vector from the front of the stream before initializing the cipher stream
+          
+          int ivLength = tempDataInputStream.readInt();
+          byte[] initVector = new byte[ivLength];
+          tempDataInputStream.read(initVector);
+          
+          cryptoParams.setInitializationVector(initVector);
+          cryptoParams.setEncryptedInputStream(boundedRangeFileInputStream);
+          
+          
+          // These two flags mirror those in WBlockState, and are very necessary to set in order that the underlying stream be written and handled
+          // correctly.
+          cryptoParams.setCloseUnderylingStreamAfterCryptoStreamClose(false);
+          cryptoParams.setRecordParametersToStream(false);
+          
+          
+          cryptoParams = cryptoModule.getDecryptingInputStream(cryptoParams);
+          inputStreamToBeCompressed = cryptoParams.getPlaintextInputStream();
+        }
+        
         try {
-          this.in = compressAlgo.createDecompressionStream(new BoundedRangeFileInputStream(fsin, this.region.getOffset(), this.region.getCompressedSize()),
-              decompressor, TFile.getFSInputBufferSize(conf));
+          this.in = compressAlgo.createDecompressionStream(inputStreamToBeCompressed, decompressor, TFile.getFSInputBufferSize(conf));
         } catch (IOException e) {
           compressAlgo.returnDecompressor(decompressor);
           throw e;
@@ -567,23 +740,81 @@ public final class BCFile {
      * @throws IOException
      */
     public Reader(FSDataInputStream fin, long fileLength, Configuration conf) throws IOException {
+            
       this.in = fin;
       this.conf = conf;
       
-      // move the cursor to the beginning of the tail, containing: offset to the
-      // meta block index, version and magic
-      fin.seek(fileLength - Magic.size() - Version.size() - Long.SIZE / Byte.SIZE);
-      long offsetIndexMeta = fin.readLong();
+      
+      // Move the cursor to grab the version and the magic first
+      fin.seek(fileLength - Magic.size() - Version.size());
       version = new Version(fin);
       Magic.readAndVerify(fin);
-      
-      if (!version.compatibleWith(BCFile.API_VERSION)) {
+
+      // Do a version check
+      if (!version.compatibleWith(BCFile.API_VERSION) && !version.equals(BCFile.API_VERSION_1)) {
         throw new RuntimeException("Incompatible BCFile fileBCFileVersion.");
       }
       
+      // Read the right number offsets based on version
+      long offsetIndexMeta = 0;
+      long offsetCryptoParameters = 0;
+      
+      if (version.equals(API_VERSION_1)) {
+        fin.seek(fileLength - Magic.size() - Version.size() - ( Long.SIZE / Byte.SIZE ) );
+        offsetIndexMeta = fin.readLong();
+       
+      } else {
+        fin.seek(fileLength - Magic.size() - Version.size() - ( 2 * ( Long.SIZE / Byte.SIZE ) ));
+        offsetIndexMeta = fin.readLong();
+        offsetCryptoParameters = fin.readLong();
+      }
+      
+      
       // read meta index
       fin.seek(offsetIndexMeta);
       metaIndex = new MetaIndex(fin);
+
+      // If they exist, read the crypto parameters
+      if (!version.equals(BCFile.API_VERSION_1)) {
+         
+        @SuppressWarnings("deprecation")
+        AccumuloConfiguration accumuloConfiguration = AccumuloConfiguration.getSiteConfiguration();
+        
+        // read crypto parameters
+        fin.seek(offsetCryptoParameters);
+        cryptoParams = new BCFileCryptoModuleParameters();
+        cryptoParams.read(fin);
+        
+        this.cryptoModule = CryptoModuleFactory.getCryptoModule(cryptoParams.getAllOptions().get(Property.CRYPTO_MODULE_CLASS.getKey()));
+        
+        // TODO: Do I need this?  Hmmm, maybe I do.
+        if (accumuloConfiguration.getBoolean(Property.CRYPTO_OVERRIDE_KEY_STRATEGY_WITH_CONFIGURED_STRATEGY)) {
+          Map<String,String> cryptoConfFromAccumuloConf = accumuloConfiguration.getAllPropertiesWithPrefix(Property.CRYPTO_PREFIX);
+          Map<String,String> instanceConf = accumuloConfiguration.getAllPropertiesWithPrefix(Property.INSTANCE_PREFIX);
+          
+          cryptoConfFromAccumuloConf.putAll(instanceConf);
+          
+          for (String name : cryptoParams.getAllOptions().keySet()) {
+            if (!name.equals(Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey())) {
+              cryptoConfFromAccumuloConf.put(name, cryptoParams.getAllOptions().get(name));
+            } else {
+              cryptoParams.setKeyEncryptionStrategyClass(cryptoConfFromAccumuloConf.get(Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey()));
+            }
+          }
+          
+          cryptoParams.setAllOptions(cryptoConfFromAccumuloConf);
+        }
+        
+        this.secretKeyEncryptionStrategy = CryptoModuleFactory.getSecretKeyEncryptionStrategy(cryptoParams.getKeyEncryptionStrategyClass());
+  
+        // This call should put the decrypted session key within the cryptoParameters object
+        cryptoParams = (BCFileCryptoModuleParameters) secretKeyEncryptionStrategy.decryptSecretKey(cryptoParams);
+        
+        
+        //secretKeyEncryptionStrategy.decryptSecretKey(cryptoParameters);
+      } else {
+        LOG.trace("Found a version 1 file to read.");
+      }
       
       // read data:BCFile.index, the data block index
       BlockReader blockR = getMetaBlock(DataIndex.BLOCK_NAME);
@@ -600,22 +831,98 @@ public final class BCFile {
       
       BlockRead cachedMetaIndex = cache.getCachedMetaBlock(META_NAME);
       BlockRead cachedDataIndex = cache.getCachedMetaBlock(DataIndex.BLOCK_NAME);
+      BlockRead cachedCryptoParams = cache.getCachedMetaBlock(CRYPTO_BLOCK_NAME);
       
-      if (cachedMetaIndex == null || cachedDataIndex == null) {
+      if (cachedMetaIndex == null || cachedDataIndex == null || cachedCryptoParams == null) {
         // move the cursor to the beginning of the tail, containing: offset to the
         // meta block index, version and magic
-        fin.seek(fileLength - Magic.size() - Version.size() - Long.SIZE / Byte.SIZE);
-        long offsetIndexMeta = fin.readLong();
+        // Move the cursor to grab the version and the magic first
+        fin.seek(fileLength - Magic.size() - Version.size());
         version = new Version(fin);
         Magic.readAndVerify(fin);
-        
-        if (!version.compatibleWith(BCFile.API_VERSION)) {
+
+        // Do a version check
+        if (!version.compatibleWith(BCFile.API_VERSION) && !version.equals(BCFile.API_VERSION_1)) {
           throw new RuntimeException("Incompatible BCFile fileBCFileVersion.");
         }
         
+        // Read the right number offsets based on version
+        long offsetIndexMeta = 0;
+        long offsetCryptoParameters = 0;
+        
+        if (version.equals(API_VERSION_1)) {
+          fin.seek(fileLength - Magic.size() - Version.size() - ( Long.SIZE / Byte.SIZE ) );
+          offsetIndexMeta = fin.readLong();
+         
+        } else {
+          fin.seek(fileLength - Magic.size() - Version.size() - ( 2 * ( Long.SIZE / Byte.SIZE ) ));
+          offsetIndexMeta = fin.readLong();
+          offsetCryptoParameters = fin.readLong();
+        }
+           
         // read meta index
         fin.seek(offsetIndexMeta);
         metaIndex = new MetaIndex(fin);
+        
+        // If they exist, read the crypto parameters
+        if (!version.equals(BCFile.API_VERSION_1) && cachedCryptoParams == null) {
+          
+          @SuppressWarnings("deprecation")
+          AccumuloConfiguration accumuloConfiguration = AccumuloConfiguration.getSiteConfiguration();
+
+          
+          // read crypto parameters
+          fin.seek(offsetCryptoParameters);
+          cryptoParams = new BCFileCryptoModuleParameters();
+          cryptoParams.read(fin);
+          
+          
+          if (accumuloConfiguration.getBoolean(Property.CRYPTO_OVERRIDE_KEY_STRATEGY_WITH_CONFIGURED_STRATEGY)) {
+            Map<String,String> cryptoConfFromAccumuloConf = accumuloConfiguration.getAllPropertiesWithPrefix(Property.CRYPTO_PREFIX);
+            Map<String,String> instanceConf = accumuloConfiguration.getAllPropertiesWithPrefix(Property.INSTANCE_PREFIX);
+            
+            cryptoConfFromAccumuloConf.putAll(instanceConf);
+            
+            for (String name : cryptoParams.getAllOptions().keySet()) {
+              if (!name.equals(Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey())) {
+                cryptoConfFromAccumuloConf.put(name, cryptoParams.getAllOptions().get(name));
+              } else {
+                cryptoParams.setKeyEncryptionStrategyClass(cryptoConfFromAccumuloConf.get(Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey()));
+              }
+            }
+            
+            cryptoParams.setAllOptions(cryptoConfFromAccumuloConf);
+          }
+          
+          
+          ByteArrayOutputStream baos = new ByteArrayOutputStream();
+          DataOutputStream dos = new DataOutputStream(baos);
+          cryptoParams.write(dos);
+          dos.close();
+          cache.cacheMetaBlock(CRYPTO_BLOCK_NAME, baos.toByteArray());
+          
+          this.cryptoModule = CryptoModuleFactory.getCryptoModule(cryptoParams.getAllOptions().get(Property.CRYPTO_MODULE_CLASS.getKey()));
+          this.secretKeyEncryptionStrategy = CryptoModuleFactory.getSecretKeyEncryptionStrategy(cryptoParams.getKeyEncryptionStrategyClass());
+    
+          // This call should put the decrypted session key within the cryptoParameters object
+          // secretKeyEncryptionStrategy.decryptSecretKey(cryptoParameters);
+          
+          cryptoParams = (BCFileCryptoModuleParameters) secretKeyEncryptionStrategy.decryptSecretKey(cryptoParams);
+          
+        } else if (cachedCryptoParams != null) {
+          cryptoParams = new BCFileCryptoModuleParameters();
+          cryptoParams.read(cachedCryptoParams);
+          
+          this.cryptoModule = CryptoModuleFactory.getCryptoModule(cryptoParams.getAllOptions().get(Property.CRYPTO_MODULE_CLASS.getKey()));
+          this.secretKeyEncryptionStrategy = CryptoModuleFactory.getSecretKeyEncryptionStrategy(cryptoParams.getKeyEncryptionStrategyClass());
+    
+          // This call should put the decrypted session key within the cryptoParameters object
+          // secretKeyEncryptionStrategy.decryptSecretKey(cryptoParameters);
+          
+          cryptoParams = (BCFileCryptoModuleParameters) secretKeyEncryptionStrategy.decryptSecretKey(cryptoParams);
+            
+        }
+        
         if (cachedMetaIndex == null) {
           ByteArrayOutputStream baos = new ByteArrayOutputStream();
           DataOutputStream dos = new DataOutputStream(baos);
@@ -630,14 +937,32 @@ public final class BCFile {
           cachedDataIndex = cache.cacheMetaBlock(DataIndex.BLOCK_NAME, blockR);
         }
         
-        dataIndex = new DataIndex(cachedDataIndex);
+        
+        try {
+          dataIndex = new DataIndex(cachedDataIndex);
+        } catch (IOException e) {
+          LOG.error("Got IOException when trying to create DataIndex block");
+          throw e;
+        }
         cachedDataIndex.close();
         
       } else {
-        // Logger.getLogger(Reader.class).debug("Read bcfile !METADATA from cache");
+        // We have cached versions of the metaIndex, dataIndex and cryptoParams objects.
+        // Use them to fill out this reader's members.
         version = null;
+        
         metaIndex = new MetaIndex(cachedMetaIndex);
         dataIndex = new DataIndex(cachedDataIndex);
+        cryptoParams = new BCFileCryptoModuleParameters();
+        cryptoParams.read(cachedCryptoParams);
+        
+        this.cryptoModule = CryptoModuleFactory.getCryptoModule(cryptoParams.getAllOptions().get(Property.CRYPTO_MODULE_CLASS.getKey()));
+        this.secretKeyEncryptionStrategy = CryptoModuleFactory.getSecretKeyEncryptionStrategy(cryptoParams.getKeyEncryptionStrategyClass());
+  
+        // This call should put the decrypted session key within the cryptoParameters object
+        cryptoParams = (BCFileCryptoModuleParameters) secretKeyEncryptionStrategy.decryptSecretKey(cryptoParams);
+
+        
       }
     }
     
@@ -727,7 +1052,7 @@ public final class BCFile {
     }
     
     private BlockReader createReader(Algorithm compressAlgo, BlockRegion region) throws IOException {
-      RBlockState rbs = new RBlockState(compressAlgo, in, region, conf);
+      RBlockState rbs = new RBlockState(compressAlgo, in, region, conf, cryptoModule, version, cryptoParams);
       return new BlockReader(rbs);
     }
     

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Compression.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Compression.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Compression.java
index 912824c..e89bb40 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Compression.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/bcfile/Compression.java
@@ -39,7 +39,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 /**
  * Compression related stuff.
  */
-final class Compression {
+public final class Compression {
   static final Log LOG = LogFactory.getLog(Compression.class);
   
   /**
@@ -71,7 +71,7 @@ final class Compression {
   /**
    * Compression algorithms.
    */
-  static enum Algorithm {
+  public static enum Algorithm {
     LZO(TFile.COMPRESSION_LZO) {
       private transient boolean checked = false;
       private static final String defaultClazz = "org.apache.hadoop.io.compress.LzoCodec";

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModule.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModule.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModule.java
index fca7d22..01f7888 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModule.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModule.java
@@ -19,92 +19,80 @@ package org.apache.accumulo.core.security.crypto;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
-import java.util.Map;
+
+import javax.crypto.Cipher;
+import javax.crypto.CipherInputStream;
+import javax.crypto.CipherOutputStream;
 
 /**
  * Classes that obey this interface may be used to provide encrypting and decrypting streams to the rest of Accumulo. Classes that obey this interface may be
  * configured as the crypto module by setting the property crypto.module.class in the accumulo-site.xml file.
  * 
- * Note that this first iteration of this API is considered deprecated because we anticipate it changing in non-backwards compatible ways as we explore the
- * requirements for encryption in Accumulo. So, your mileage is gonna vary a lot as we go forward.
  * 
  */
-@Deprecated
 public interface CryptoModule {
   
-  public enum CryptoInitProperty {
-    ALGORITHM_NAME("algorithm.name"), CIPHER_SUITE("cipher.suite"), INITIALIZATION_VECTOR("initialization.vector"), PLAINTEXT_SESSION_KEY(
-        "plaintext.session.key");
-    
-    private CryptoInitProperty(String name) {
-      key = name;
-    }
-    
-    private String key;
-    
-    public String getKey() {
-      return key;
-    }
-  }
-  
   /**
-   * Wraps an OutputStream in an encrypting OutputStream. The given map contains the settings for the cryptographic algorithm to use. <b>Callers of this method
-   * should expect that the given OutputStream will be written to before cryptographic writes occur.</b> These writes contain the cryptographic information used
-   * to encrypt the following bytes (these data include the initialization vector, encrypted session key, and so on). If writing arbitrarily to the underlying
-   * stream is not desirable, users should call the other flavor of getEncryptingOutputStream which accepts these data as parameters.
+   * Takes a {@link CryptoModuleParameters} object containing an {@link OutputStream} to wrap within a {@link CipherOutputStream}. The various other parts of the
+   * {@link CryptoModuleParameters} object specify the details about the type of encryption to use. Callers should pay special attention to the
+   * {@link CryptoModuleParameters#getRecordParametersToStream()} and {@link CryptoModuleParameters#getCloseUnderylingStreamAfterCryptoStreamClose()} flags
+   * within the {@link CryptoModuleParameters} object, as they control whether or not this method will write to the given {@link OutputStream} in
+   * {@link CryptoModuleParameters#getPlaintextOutputStream()}.
+   * 
+   * <p>
+   * 
+   * This method returns a {@link CryptoModuleParameters} object. Implementers of this interface maintain a contract that the returned object is <i>the same</i>
+   * as the one passed in, always. Return values are enclosed within that object, as some other calls will typically return more than one value.
    * 
-   * @param out
-   *          the OutputStream to wrap
-   * @param cryptoOpts
-   *          the cryptographic parameters to use; specific string names to look for will depend on the various implementations
-   * @return an OutputStream that wraps the given parameter
+   * @param params
+   *          the {@link CryptoModuleParameters} object that specifies how to set up the encrypted stream.
+   * @return the same {@link CryptoModuleParameters} object with the {@link CryptoModuleParameters#getEncryptedOutputStream()} set to a stream that is not null.
+   *         That stream may be exactly the same stream as {@link CryptoModuleParameters#getPlaintextInputStream()} if the params object specifies no cryptography.
    * @throws IOException
    */
-  public OutputStream getEncryptingOutputStream(OutputStream out, Map<String,String> cryptoOpts) throws IOException;
+  public CryptoModuleParameters getEncryptingOutputStream(CryptoModuleParameters params) throws IOException;
+  
+  
   
   /**
-   * Wraps an InputStream and returns a decrypting input stream. The given map contains the settings for the intended cryptographic operations, but implementors
-   * should take care to ensure that the crypto from the given input stream matches their expectations about what they will use to decrypt it, as the parameters
-   * may have changed. Also, care should be taken around transitioning between non-encrypting and encrypting streams; implementors should handle the case where
-   * the given input stream is <b>not</b> encrypted at all.
+   * Takes a {@link CryptoModuleParameters} object containing an {@link InputStream} to wrap within a {@link CipherInputStream}. The various other parts of the
+   * {@link CryptoModuleParameters} object specify the details about the type of encryption to use. Callers should pay special attention to the
+   * {@link CryptoModuleParameters#getRecordParametersToStream()} and {@link CryptoModuleParameters#getCloseUnderylingStreamAfterCryptoStreamClose()} flags
+   * within the {@link CryptoModuleParameters} object, as they control whether or not this method will read from the given {@link InputStream} in
+   * {@link CryptoModuleParameters#getEncryptedInputStream()}.
    * 
-   * It is expected that this version of getDecryptingInputStream is called in conjunction with the getEncryptingOutputStream from above. It should expect its
-   * input streams to contain the data written by getEncryptingOutputStream.
+   * <p>
    * 
-   * @param in
-   *          the InputStream to wrap
-   * @param cryptoOpts
-   *          the cryptographic parameters to use; specific string names to look for will depend on the various implementations
-   * @return an InputStream that wraps the given parameter
+   * This method returns a {@link CryptoModuleParameters} object. Implementers of this interface maintain a contract that the returned object is <i>the same</i>
+   * as the one passed in, always. Return values are enclosed within that object, as some other calls will typically return more than one value.
+   * 
+   * @param params
+   *          the {@link CryptoModuleParameters} object that specifies how to set up the encrypted stream.
+   * @return the same {@link CryptoModuleParameters} object with the {@link CryptoModuleParameters#getPlaintextInputStream()} set to a stream that is not null.
+   *         That stream may be exactly the same stream as {@link CryptoModuleParameters#getEncryptedInputStream()} if the params object specifies no cryptography.
    * @throws IOException
    */
-  public InputStream getDecryptingInputStream(InputStream in, Map<String,String> cryptoOpts) throws IOException;
+  public CryptoModuleParameters getDecryptingInputStream(CryptoModuleParameters params) throws IOException;
+
   
   /**
-   * Wraps an OutputStream in an encrypting OutputStream. The given map contains the settings for the cryptographic algorithm to use. The cryptoInitParams map
-   * contains all the cryptographic details to construct a key (or keys), initialization vectors, etc. and use them to properly initialize the stream for
-   * writing. These initialization parameters must be persisted elsewhere, along with the cryptographic configuration (algorithm, mode, etc.), so that they may
-   * be read in at the time of reading the encrypted content.
+   * Generates a random session key and sets it into the {@link CryptoModuleParameters#getPlaintextKey()} property.  Saves callers from 
+   * having to set up their own secure random provider.  Also will set the {@link CryptoModuleParameters#getSecureRandom()} property if it
+   * has not already been set by some other function.
    * 
-   * @param out
-   *          the OutputStream to wrap
-   * @param conf
-   *          the cryptographic algorithm configuration
-   * @param cryptoInitParams
-   *          the initialization parameters for the algorithm, usually including initialization vector and session key
-   * @return a wrapped output stream
+   * @param params a {@link CryptoModuleParameters} object contained a correctly instantiated set of properties.
+   * @return the same {@link CryptoModuleParameters} object with the plaintext key set
    */
-  public OutputStream getEncryptingOutputStream(OutputStream out, Map<String,String> conf, Map<CryptoModule.CryptoInitProperty,Object> cryptoInitParams);
+  public CryptoModuleParameters generateNewRandomSessionKey(CryptoModuleParameters params);
   
   /**
-   * Wraps an InputStream and returns a decrypting input stream. The given map contains the settings for the intended cryptographic operations, but implementors
-   * should take care to ensure that the crypto from the given input stream matches their expectations about what they will use to decrypt it, as the parameters
-   * may have changed. Also, care should be taken around transitioning between non-encrypting and encrypting streams; implementors should handle the case where
-   * the given input stream is <b>not</b> encrypted at all.
+   * Generates a {@link Cipher} object based on the parameters in the given {@link CryptoModuleParameters} object and places it into the
+   * {@link CryptoModuleParameters#getCipher()} property. Callers may choose to use this method if they want to get the initialization
+   * vector from the cipher before proceeding to create wrapped streams.  
    * 
-   * The cryptoInitParams contains all necessary information to properly initialize the given cipher, usually including things like initialization vector and
-   * secret key.
+   * @param params a {@link CryptoModuleParameters} object contained a correctly instantiated set of properties.
+   * @return the same {@link CryptoModuleParameters} object with the cipher set.
    */
-  public InputStream getDecryptingInputStream(InputStream in, Map<String,String> cryptoOpts, Map<CryptoModule.CryptoInitProperty,Object> cryptoInitParams)
-      throws IOException;
+  public CryptoModuleParameters initializeCipher(CryptoModuleParameters params);
+ 
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
index 2f03e02..40f2c1e 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleFactory.java
@@ -17,8 +17,6 @@
 package org.apache.accumulo.core.security.crypto;
 
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
 import java.util.Map;
 
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
@@ -29,9 +27,8 @@ import org.apache.log4j.Logger;
 /**
  * This factory module exists to assist other classes in loading crypto modules.
  * 
- * @deprecated This feature is experimental and may go away in future versions.
+ * 
  */
-@Deprecated
 public class CryptoModuleFactory {
   
   private static Logger log = Logger.getLogger(CryptoModuleFactory.class);
@@ -50,9 +47,14 @@ public class CryptoModuleFactory {
   
   @SuppressWarnings({"rawtypes"})
   public static CryptoModule getCryptoModule(String cryptoModuleClassname) {
-    log.debug(String.format("About to instantiate crypto module %s", cryptoModuleClassname));
     
-    if (cryptoModuleClassname.equals("NullCryptoModule")) {
+    if (cryptoModuleClassname != null) {
+      cryptoModuleClassname = cryptoModuleClassname.trim();
+    }
+    
+    log.trace(String.format("About to instantiate crypto module %s", cryptoModuleClassname));
+    
+    if (cryptoModuleClassname == null || cryptoModuleClassname.equals("NullCryptoModule")) {
       return new NullCryptoModule();
     }
     
@@ -61,7 +63,7 @@ public class CryptoModuleFactory {
     try {
       cryptoModuleClazz = AccumuloVFSClassLoader.loadClass(cryptoModuleClassname);
     } catch (ClassNotFoundException e1) {
-      log.warn(String.format("Could not find configured crypto module \"%s\".  NO ENCRYPTION WILL BE USED.", cryptoModuleClassname));
+      log.warn(String.format("Could not find configured crypto module \"%s\".  No encryption will be used.", cryptoModuleClassname));
       return new NullCryptoModule();
     }
     
@@ -77,21 +79,21 @@ public class CryptoModuleFactory {
     }
     
     if (!implementsCryptoModule) {
-      log.warn("Configured Accumulo crypto module \"%s\" does not implement the CryptoModule interface. NO ENCRYPTION WILL BE USED.");
+      log.warn("Configured Accumulo crypto module \"%s\" does not implement the CryptoModule interface. No encryption will be used.");
       return new NullCryptoModule();
     } else {
       try {
         cryptoModule = (CryptoModule) cryptoModuleClazz.newInstance();
         
-        log.debug("Successfully instantiated crypto module");
+        log.trace("Successfully instantiated crypto module");
         
       } catch (InstantiationException e) {
-        log.warn(String.format("Got instantiation exception %s when instantiating crypto module \"%s\".  NO ENCRYPTION WILL BE USED.", e.getCause().getClass()
+        log.warn(String.format("Got instantiation exception %s when instantiating crypto module \"%s\".  No encryption will be used.", e.getCause().getClass()
             .getCanonicalName(), cryptoModuleClassname));
         log.warn(e.getCause());
         return new NullCryptoModule();
       } catch (IllegalAccessException e) {
-        log.warn(String.format("Got illegal access exception when trying to instantiate crypto module \"%s\".  NO ENCRYPTION WILL BE USED.",
+        log.warn(String.format("Got illegal access exception when trying to instantiate crypto module \"%s\".  No encryption will be used.",
             cryptoModuleClassname));
         log.warn(e);
         return new NullCryptoModule();
@@ -107,6 +109,11 @@ public class CryptoModuleFactory {
   
   @SuppressWarnings("rawtypes")
   public static SecretKeyEncryptionStrategy getSecretKeyEncryptionStrategy(String className) {
+    
+    if (className != null) {
+      className = className.trim();
+    }
+    
     if (className == null || className.equals("NullSecretKeyEncryptionStrategy")) {
       return new NullSecretKeyEncryptionStrategy();
     }
@@ -116,7 +123,7 @@ public class CryptoModuleFactory {
     try {
       keyEncryptionStrategyClazz = AccumuloVFSClassLoader.loadClass(className);
     } catch (ClassNotFoundException e1) {
-      log.warn(String.format("Could not find configured secret key encryption strategy \"%s\".  NO ENCRYPTION WILL BE USED.", className));
+      log.warn(String.format("Could not find configured secret key encryption strategy \"%s\".  No encryption will be used.", className));
       return new NullSecretKeyEncryptionStrategy();
     }
     
@@ -132,21 +139,21 @@ public class CryptoModuleFactory {
     }
     
     if (!implementsSecretKeyStrategy) {
-      log.warn("Configured Accumulo secret key encryption strategy \"%s\" does not implement the SecretKeyEncryptionStrategy interface. NO ENCRYPTION WILL BE USED.");
+      log.warn("Configured Accumulo secret key encryption strategy \"%s\" does not implement the SecretKeyEncryptionStrategy interface. No encryption will be used.");
       return new NullSecretKeyEncryptionStrategy();
     } else {
       try {
         strategy = (SecretKeyEncryptionStrategy) keyEncryptionStrategyClazz.newInstance();
         
-        log.debug("Successfully instantiated secret key encryption strategy");
+        log.trace("Successfully instantiated secret key encryption strategy");
         
       } catch (InstantiationException e) {
-        log.warn(String.format("Got instantiation exception %s when instantiating secret key encryption strategy \"%s\".  NO ENCRYPTION WILL BE USED.", e
+        log.warn(String.format("Got instantiation exception %s when instantiating secret key encryption strategy \"%s\".  No encryption will be used.", e
             .getCause().getClass().getCanonicalName(), className));
         log.warn(e.getCause());
         return new NullSecretKeyEncryptionStrategy();
       } catch (IllegalAccessException e) {
-        log.warn(String.format("Got illegal access exception when trying to instantiate secret key encryption strategy \"%s\".  NO ENCRYPTION WILL BE USED.",
+        log.warn(String.format("Got illegal access exception when trying to instantiate secret key encryption strategy \"%s\".  No encryption will be used.",
             className));
         log.warn(e);
         return new NullSecretKeyEncryptionStrategy();
@@ -156,99 +163,96 @@ public class CryptoModuleFactory {
     return strategy;
   }
   
-  private static class NullSecretKeyEncryptionStrategy implements SecretKeyEncryptionStrategy {
-    
-    @Override
-    public SecretKeyEncryptionStrategyContext encryptSecretKey(SecretKeyEncryptionStrategyContext context) {
-      context.setEncryptedSecretKey(context.getPlaintextSecretKey());
-      context.setOpaqueKeyEncryptionKeyID("");
-      
-      return context;
-    }
+  static class NullSecretKeyEncryptionStrategy implements SecretKeyEncryptionStrategy {
     
     @Override
-    public SecretKeyEncryptionStrategyContext decryptSecretKey(SecretKeyEncryptionStrategyContext context) {
-      context.setPlaintextSecretKey(context.getEncryptedSecretKey());
+    public CryptoModuleParameters encryptSecretKey(CryptoModuleParameters params) {
+      params.setEncryptedKey(params.getPlaintextKey());
+      params.setOpaqueKeyEncryptionKeyID("");
       
-      return context;
+      return params;
     }
-    
+
     @Override
-    public SecretKeyEncryptionStrategyContext getNewContext() {
-      return new SecretKeyEncryptionStrategyContext() {
-        
-        @Override
-        public byte[] getPlaintextSecretKey() {
-          return plaintextSecretKey;
-        }
-        
-        @Override
-        public void setPlaintextSecretKey(byte[] plaintextSecretKey) {
-          this.plaintextSecretKey = plaintextSecretKey;
-        }
-        
-        @Override
-        public byte[] getEncryptedSecretKey() {
-          return encryptedSecretKey;
-        }
-        
-        @Override
-        public void setEncryptedSecretKey(byte[] encryptedSecretKey) {
-          this.encryptedSecretKey = encryptedSecretKey;
-        }
-        
-        @Override
-        public String getOpaqueKeyEncryptionKeyID() {
-          return opaqueKeyEncryptionKeyID;
-        }
-        
-        @Override
-        public void setOpaqueKeyEncryptionKeyID(String opaqueKeyEncryptionKeyID) {
-          this.opaqueKeyEncryptionKeyID = opaqueKeyEncryptionKeyID;
-        }
-        
-        @Override
-        public Map<String,String> getContext() {
-          return context;
-        }
-        
-        @Override
-        public void setContext(Map<String,String> context) {
-          this.context = context;
-        }
-        
-        private byte[] plaintextSecretKey;
-        private byte[] encryptedSecretKey;
-        private String opaqueKeyEncryptionKeyID;
-        private Map<String,String> context;
-      };
+    public CryptoModuleParameters decryptSecretKey(CryptoModuleParameters params) {
+      params.setPlaintextKey(params.getEncryptedKey());
+      return params;
     }
     
   }
   
-  private static class NullCryptoModule implements CryptoModule {
+  static class NullCryptoModule implements CryptoModule {
     
     @Override
-    public OutputStream getEncryptingOutputStream(OutputStream out, Map<String,String> cryptoOpts) throws IOException {
-      return out;
+    public CryptoModuleParameters getEncryptingOutputStream(CryptoModuleParameters params) throws IOException {
+      params.setEncryptedOutputStream(params.getPlaintextOutputStream());
+      return params;
     }
-    
+
     @Override
-    public InputStream getDecryptingInputStream(InputStream in, Map<String,String> cryptoOpts) throws IOException {
-      return in;
+    public CryptoModuleParameters getDecryptingInputStream(CryptoModuleParameters params) throws IOException {
+      params.setPlaintextInputStream(params.getEncryptedInputStream());
+      return params;
     }
-    
+
     @Override
-    public OutputStream getEncryptingOutputStream(OutputStream out, Map<String,String> conf, Map<CryptoInitProperty,Object> cryptoInitParams) {
-      return out;
+    public CryptoModuleParameters generateNewRandomSessionKey(CryptoModuleParameters params) {
+      params.setPlaintextKey(new byte[0]);
+      return params;
     }
-    
+
     @Override
-    public InputStream getDecryptingInputStream(InputStream in, Map<String,String> cryptoOpts, Map<CryptoInitProperty,Object> cryptoInitParams)
-        throws IOException {
-      return in;
+    public CryptoModuleParameters initializeCipher(CryptoModuleParameters params) {
+      return params;
+    }
+
+  }
+  
+  public static String[] parseCipherTransform(String cipherTransform) {
+    if (cipherTransform == null) {
+      return new String[3];
     }
     
+    return cipherTransform.split("/");
   }
+
+  
+  public static CryptoModuleParameters createParamsObjectFromAccumuloConfiguration(AccumuloConfiguration conf) {
+    
+    // Get all the options from the configuration
+    Map<String,String> cryptoOpts = conf.getAllPropertiesWithPrefix(Property.CRYPTO_PREFIX);
+    cryptoOpts.putAll(conf.getAllPropertiesWithPrefix(Property.INSTANCE_PREFIX));
+    CryptoModuleParameters params = new CryptoModuleParameters();
+
+    return fillParamsObjectFromStringMap(params, cryptoOpts);
+  }
+
+  public static CryptoModuleParameters fillParamsObjectFromStringMap(CryptoModuleParameters params, Map<String,String> cryptoOpts) {
+
+    // Parse the cipher suite for the mode and padding options
+    String[] cipherTransformParts = parseCipherTransform(cryptoOpts.get(Property.CRYPTO_CIPHER_SUITE.getKey()));
+    
+    // If no encryption has been specified, then we abort here.
+    if (cipherTransformParts[0] == null || cipherTransformParts[0].equals("NullCipher")) {
+      params.setAllOptions(cryptoOpts);
+      params.setAlgorithmName("NullCipher");
+      return params;
+    }
+    
+    params.setAllOptions(cryptoOpts);
+    
+    // 
+    params.setAlgorithmName(cryptoOpts.get(Property.CRYPTO_CIPHER_ALGORITHM_NAME.getKey()));
+    params.setEncryptionMode(cipherTransformParts[1]);
+    params.setKeyEncryptionStrategyClass(cryptoOpts.get(Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey()));
+    params.setKeyLength(Integer.parseInt(cryptoOpts.get(Property.CRYPTO_CIPHER_KEY_LENGTH.getKey())));
+    params.setOverrideStreamsSecretKeyEncryptionStrategy(Boolean.parseBoolean(cryptoOpts.get(Property.CRYPTO_OVERRIDE_KEY_STRATEGY_WITH_CONFIGURED_STRATEGY.getKey())));
+    params.setPadding(cipherTransformParts[2]);
+    params.setRandomNumberGenerator(cryptoOpts.get(Property.CRYPTO_SECURE_RNG.getKey()));
+    params.setRandomNumberGeneratorProvider(cryptoOpts.get(Property.CRYPTO_SECURE_RNG_PROVIDER.getKey()));
+
+    return params;
+  }
+
   
 }


[03/50] [abbrv] ACCUMULO-1537 converted many more functional tests to integration tests

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorTest.java b/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorTest.java
deleted file mode 100644
index a972b65..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/ServerSideErrorTest.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TableOperations;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.Combiner;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.hadoop.io.Text;
-
-public class ServerSideErrorTest extends FunctionalTest {
-  
-  @Override
-  public void cleanup() throws Exception {}
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.emptyList();
-  }
-  
-  @Override
-  public void run() throws Exception {
-    
-    // Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
-    // logger.setLevel(Level.TRACE);
-    
-    getConnector().tableOperations().create("tt");
-    IteratorSetting is = new IteratorSetting(5, "Bad Aggregator", BadCombiner.class);
-    Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("acf")));
-    getConnector().tableOperations().attachIterator("tt", is);
-    
-    BatchWriter bw = getConnector().createBatchWriter("tt", new BatchWriterConfig());
-    
-    Mutation m = new Mutation(new Text("r1"));
-    m.put(new Text("acf"), new Text("foo"), new Value("1".getBytes()));
-    
-    bw.addMutation(m);
-    
-    bw.close();
-    
-    // try to scan table
-    Scanner scanner = getConnector().createScanner("tt", Authorizations.EMPTY);
-    
-    boolean caught = false;
-    try {
-      for (Entry<Key,Value> entry : scanner) {
-        entry.getKey();
-      }
-    } catch (Exception e) {
-      caught = true;
-    }
-    
-    if (!caught)
-      throw new Exception("Scan did not fail");
-    
-    // try to batch scan the table
-    BatchScanner bs = getConnector().createBatchScanner("tt", Authorizations.EMPTY, 2);
-    bs.setRanges(Collections.singleton(new Range()));
-    
-    caught = false;
-    try {
-      for (Entry<Key,Value> entry : bs) {
-        entry.getKey();
-      }
-      bs.close();
-    } catch (Exception e) {
-      caught = true;
-    }
-    if (!caught)
-      throw new Exception("batch scan did not fail");
-    
-    // remove the bad agg so accumulo can shutdown
-    TableOperations to = getConnector().tableOperations();
-    for (Entry<String,String> e : to.getProperties("tt")) {
-      to.removeProperty("tt", e.getKey());
-    }
-    
-    UtilWaitThread.sleep(500);
-    
-    // should be able to scan now
-    scanner = getConnector().createScanner("tt", Authorizations.EMPTY);
-    for (Entry<Key,Value> entry : scanner) {
-      entry.getKey();
-    }
-    
-    // set a non existant iterator, should cause scan to fail on server side
-    scanner.addScanIterator(new IteratorSetting(100, "bogus", "com.bogus.iterator"));
-    
-    caught = false;
-    try {
-      for (Entry<Key,Value> entry : scanner) {
-        // should error
-        entry.getKey();
-      }
-    } catch (Exception e) {
-      caught = true;
-    }
-    
-    if (!caught)
-      throw new Exception("Scan did not fail");
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyTest.java b/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyTest.java
deleted file mode 100644
index 1ab77ba..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/SparseColumnFamilyTest.java
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.io.Text;
-
-/**
- * This test recreates issue ACCUMULO-516. Until that issue is fixed this test should time out.
- */
-public class SparseColumnFamilyTest extends FunctionalTest {
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.emptyList();
-  }
-  
-  @Override
-  public void run() throws Exception {
-    getConnector().tableOperations().create("scftt");
-    
-    BatchWriter bw = getConnector().createBatchWriter("scftt", new BatchWriterConfig());
-    
-    // create file in the tablet that has mostly column family 0, with a few entries for column family 1
-    
-    bw.addMutation(nm(0, 1, 0));
-    for (int i = 1; i < 99999; i++) {
-      bw.addMutation(nm(i * 2, 0, i));
-    }
-    bw.addMutation(nm(99999 * 2, 1, 99999));
-    bw.flush();
-    
-    getConnector().tableOperations().flush("scftt", null, null, true);
-    
-    // create a file that has column family 1 and 0 interleaved
-    for (int i = 0; i < 100000; i++) {
-      bw.addMutation(nm(i * 2 + 1, i % 2 == 0 ? 0 : 1, i));
-    }
-    bw.close();
-    
-    getConnector().tableOperations().flush("scftt", null, null, true);
-    
-    Scanner scanner = getConnector().createScanner("scftt", Authorizations.EMPTY);
-    
-    for (int i = 0; i < 200; i++) {
-      
-      // every time we search for column family 1, it will scan the entire file
-      // that has mostly column family 0 until the bug is fixed
-      scanner.setRange(new Range(String.format("%06d", i), null));
-      scanner.clearColumns();
-      scanner.setBatchSize(3);
-      scanner.fetchColumnFamily(new Text(String.format("%03d", 1)));
-      
-      long t1 = System.currentTimeMillis();
-      Iterator<Entry<Key,Value>> iter = scanner.iterator();
-      if (iter.hasNext()) {
-        Entry<Key,Value> entry = iter.next();
-        if (!"001".equals(entry.getKey().getColumnFamilyData().toString())) {
-          throw new Exception();
-        }
-      }
-      long t2 = System.currentTimeMillis();
-      
-      System.out.println("time " + (t2 - t1));
-      
-    }
-  }
-  
-  /**
-   * @param i
-   * @param j
-   * @param k
-   * @return
-   */
-  private Mutation nm(int row, int cf, int val) {
-    Mutation m = new Mutation(String.format("%06d", row));
-    m.put(String.format("%03d", cf), "", "" + val);
-    return m;
-  }
-  
-  @Override
-  public void cleanup() throws Exception {}
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/TimeoutTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/TimeoutTest.java b/test/src/main/java/org/apache/accumulo/test/functional/TimeoutTest.java
deleted file mode 100644
index e7e045d..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/TimeoutTest.java
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TimedOutException;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-
-/**
- * 
- */
-public class TimeoutTest extends FunctionalTest {
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.emptyList();
-  }
-  
-  @Override
-  public void run() throws Exception {
-    testBatchScannerTimeout();
-    testBatchWriterTimeout();
-  }
-  
-  public void testBatchWriterTimeout() throws Exception {
-    Connector conn = getConnector();
-    
-    conn.tableOperations().create("foo1");
-    
-    conn.tableOperations().addConstraint("foo1", SlowConstraint.class.getName());
-    
-    // give constraint time to propogate through zookeeper
-    UtilWaitThread.sleep(250);
-    
-    BatchWriter bw = conn.createBatchWriter("foo1", new BatchWriterConfig().setTimeout(3, TimeUnit.SECONDS));
-    
-    Mutation mut = new Mutation("r1");
-    mut.put("cf1", "cq1", "v1");
-    
-    bw.addMutation(mut);
-    try {
-      bw.close();
-      throw new Exception("batch writer did not timeout");
-    } catch (MutationsRejectedException mre) {
-      if (!(mre.getCause() instanceof TimedOutException)) {
-        throw mre;
-      }
-    }
-  }
-  
-  public void testBatchScannerTimeout() throws Exception {
-    getConnector().tableOperations().create("timeout");
-    
-    BatchWriter bw = getConnector().createBatchWriter("timeout", new BatchWriterConfig());
-    
-    Mutation m = new Mutation("r1");
-    m.put("cf1", "cq1", "v1");
-    m.put("cf1", "cq2", "v2");
-    m.put("cf1", "cq3", "v3");
-    m.put("cf1", "cq4", "v4");
-    
-    bw.addMutation(m);
-    
-    bw.close();
-    
-    BatchScanner bs = getConnector().createBatchScanner("timeout", Authorizations.EMPTY, 2);
-    bs.setTimeout(1, TimeUnit.SECONDS);
-    bs.setRanges(Collections.singletonList(new Range()));
-    
-    // should not timeout
-    for (Entry<Key,Value> entry : bs) {
-      entry.getKey();
-    }
-    
-    IteratorSetting iterSetting = new IteratorSetting(100, SlowIterator.class);
-    iterSetting.addOption("sleepTime", 2000 + "");
-    getConnector().tableOperations().attachIterator("timeout", iterSetting);
-    UtilWaitThread.sleep(250);
-    
-    try {
-      for (Entry<Key,Value> entry : bs) {
-        entry.getKey();
-      }
-      throw new Exception("batch scanner did not time out");
-    } catch (TimedOutException toe) {
-      // toe.printStackTrace();
-    }
-    
-    bs.close();
-  }
-  
-  @Override
-  public void cleanup() throws Exception {
-    
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
index b8e9b18..ad9e3fa 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
@@ -32,17 +32,22 @@ public class DeleteIT extends MacTest {
   @Test(timeout=60*1000)
   public void test() throws Exception {
     Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    deleteTest(c);
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+  }
+
+  public static void deleteTest(Connector c) throws Exception {
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
     TestIngest.Opts opts = new TestIngest.Opts();
-    vopts.rows = opts.rows = 10000;
+    vopts.rows = opts.rows = 1000;
     vopts.cols = opts.cols = 1;
     vopts.random = opts.random = 56;
-    opts.createTable = true;
     TestIngest.ingest(c, opts, new BatchWriterOpts());
     assertEquals(0, cluster.exec(TestRandomDeletes.class, "-p", MacTest.PASSWORD, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers()).waitFor());
     TestIngest.ingest(c, opts, new BatchWriterOpts());
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
-    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
   }
   
+  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
index 56636ed..96e425c 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DynamicThreadPoolsIT.java
@@ -16,8 +16,7 @@
  */
 package org.apache.accumulo.test.functional;
 
-import static org.junit.Assert.*;
-import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.util.Collections;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java b/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
new file mode 100644
index 0000000..1698193
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+/**
+ * See ACCUMULO-779
+ */
+public class FateStarvationIT extends MacTest {
+  
+  @Test(timeout=60*1000)
+  public void run() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    
+    c.tableOperations().addSplits("test_ingest", TestIngest.getSplitPoints(0, 100000, 50));
+    
+    TestIngest.Opts opts = new TestIngest.Opts();
+    opts.random = 89;
+    opts.timestamp = 7;
+    opts.dataSize = 50;
+    opts.rows = 100000;
+    opts.cols = 1;
+    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    
+    c.tableOperations().flush("test_ingest", null, null, true);
+    
+    List<Text> splits = new ArrayList<Text>(TestIngest.getSplitPoints(0, 100000, 67));
+    Random rand = new Random();
+    
+    for (int i = 0; i < 100; i++) {
+      int idx1 = rand.nextInt(splits.size() - 1);
+      int idx2 = rand.nextInt(splits.size() - (idx1 + 1)) + idx1 + 1;
+      
+      c.tableOperations().compact("test_ingest", splits.get(idx1), splits.get(idx2), false, false);
+    }
+    
+    c.tableOperations().offline("test_ingest");
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
index 8b8dc61..1466153 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
@@ -34,6 +34,7 @@ import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
@@ -149,6 +150,15 @@ public class FunctionalTestUtils {
     return readAll(new FileInputStream(c.getConfig().getLogDir() + "/" + klass.getSimpleName() + "_" + p.hashCode() + ".out"));
   }
   
+  static Mutation nm(String row, String cf, String cq, Value value) {
+    Mutation m = new Mutation(new Text(row));
+    m.put(new Text(cf), new Text(cq), value);
+    return m;
+  }
+  
+  static Mutation nm(String row, String cf, String cq, String value) {
+    return nm(row, cf, cq, new Value(value.getBytes()));
+  }
 
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
new file mode 100644
index 0000000..eb5c0fa
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MemoryUnit;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.server.gc.SimpleGarbageCollector;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class GarbageCollectorIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String, String> settings = new HashMap<String, String>();
+    settings.put(Property.GC_CYCLE_START.getKey(), "1");
+    settings.put(Property.GC_CYCLE_DELAY.getKey(), "1");
+    settings.put(Property.TSERV_MAXMEM.getKey(), "5K");
+    settings.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
+    cfg.setSiteConfig(settings);
+  }
+
+  @Test(timeout=60*1000)
+  public void gcTest() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "5K");
+    TestIngest.Opts opts = new TestIngest.Opts();
+    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+    vopts.rows = opts.rows = 10000;
+    vopts.cols = opts.cols = 1;
+    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    c.tableOperations().compact("test_ingest", null, null, true, true);
+    int before = countFiles();
+    while (true) {
+      UtilWaitThread.sleep(1000);
+      int more = countFiles();
+      if (more <= before)
+        break;
+      before = more;
+    }
+    Process gc = cluster.exec(SimpleGarbageCollector.class);
+    UtilWaitThread.sleep(5*1000);
+    int after = countFiles();
+    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+    assertTrue(after < before);
+    gc.destroy();
+  }
+  
+  @Test(timeout=60*1000)
+  public void gcLotsOfCandidatesIT() throws Exception {
+    log.info("Filling !METADATA table with bogus delete flags");
+    Connector c = getConnector();
+    addEntries(c, new BatchWriterOpts());
+    cluster.getConfig().setDefaultMemory(10, MemoryUnit.MEGABYTE);
+    Process gc = cluster.exec(SimpleGarbageCollector.class);
+    UtilWaitThread.sleep(10*1000);
+    String output = FunctionalTestUtils.readAll(cluster, SimpleGarbageCollector.class, gc);
+    gc.destroy();
+    assertTrue(output.contains("delete candidates has exceeded"));
+  }
+
+  private int countFiles() throws Exception {
+    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+    int result = 0;
+    Path path = new Path(cluster.getConfig().getDir()+"/accumulo/tables/1/*/*.rf");
+    for (@SuppressWarnings("unused") FileStatus entry : fs.globStatus(path)) {
+      result++;
+    }
+    return result;
+  }
+  
+  public static void addEntries(Connector conn, BatchWriterOpts bwOpts) throws Exception {
+    conn.securityOperations().grantTablePermission(conn.whoami(), MetadataTable.NAME, TablePermission.WRITE);
+    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
+    
+    for (int i = 0; i < 100000; ++i) {
+      final Text emptyText = new Text("");
+      Text row = new Text(String.format("%s%s%020d%s", MetadataTable.DELETED_RANGE.getStartKey().getRow().toString(), "/", i,
+          "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj"));
+      Mutation delFlag = new Mutation(row);
+      delFlag.put(emptyText, emptyText, new Value(new byte[] {}));
+      bw.addMutation(delFlag);
+    }
+    bw.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
new file mode 100644
index 0000000..6e06934
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Collections;
+import java.util.Map.Entry;
+import java.util.Random;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
+import org.junit.Test;
+
+public class LargeRowIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "10ms"));
+  }
+
+  private static final int SEED = 42;
+  private static final String REG_TABLE_NAME = "lr";
+  private static final String PRE_SPLIT_TABLE_NAME = "lrps";
+  private static final int NUM_ROWS = 100;
+  private static final int ROW_SIZE = 1 << 17;
+  private static final int NUM_PRE_SPLITS = 9;
+  private static final int SPLIT_THRESH = ROW_SIZE * NUM_ROWS / NUM_PRE_SPLITS;
+  
+  @Test(timeout=60*1000)
+  public void run() throws Exception {
+    Random r = new Random();
+    byte rowData[] = new byte[ROW_SIZE];
+    r.setSeed(SEED + 1);
+    TreeSet<Text> splitPoints = new TreeSet<Text>();
+    for (int i = 0; i < NUM_PRE_SPLITS; i++) {
+      r.nextBytes(rowData);
+      TestIngest.toPrintableChars(rowData);
+      splitPoints.add(new Text(rowData));
+    }
+    Connector c = getConnector();
+    c.tableOperations().create(REG_TABLE_NAME);
+    c.tableOperations().create(PRE_SPLIT_TABLE_NAME);
+    c.tableOperations().addSplits(PRE_SPLIT_TABLE_NAME, splitPoints);
+    test1(c);
+    test2(c);
+  }
+  
+  private void test1(Connector c) throws Exception {
+    
+    basicTest(c, REG_TABLE_NAME, 0);
+    
+    c.tableOperations().setProperty(REG_TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "" + SPLIT_THRESH);
+    
+    UtilWaitThread.sleep(12000);
+    Logger.getLogger(LargeRowIT.class).warn("checking splits");
+    FunctionalTestUtils.checkSplits(c, REG_TABLE_NAME, NUM_PRE_SPLITS / 2, NUM_PRE_SPLITS * 4);
+    
+    verify(c, REG_TABLE_NAME);
+  }
+  
+  private void test2(Connector c) throws Exception {
+    basicTest(c, PRE_SPLIT_TABLE_NAME, NUM_PRE_SPLITS);
+  }
+  
+  private void basicTest(Connector c, String table, int expectedSplits) throws Exception {
+    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
+    
+    Random r = new Random();
+    byte rowData[] = new byte[ROW_SIZE];
+    
+    r.setSeed(SEED);
+    
+    for (int i = 0; i < NUM_ROWS; i++) {
+      
+      r.nextBytes(rowData);
+      TestIngest.toPrintableChars(rowData);
+      
+      Mutation mut = new Mutation(new Text(rowData));
+      mut.put(new Text(""), new Text(""), new Value(("" + i).getBytes()));
+      bw.addMutation(mut);
+    }
+    
+    bw.close();
+    
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+    
+    verify(c, table);
+    
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+    
+    c.tableOperations().flush(table, null, null, false);
+    
+    // verify while table flush is running
+    verify(c, table);
+    
+    // give split time to complete
+    c.tableOperations().flush(table, null, null, true);
+    
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+    
+    verify(c, table);
+    
+    FunctionalTestUtils.checkSplits(c, table, expectedSplits, expectedSplits);
+  }
+  
+  private void verify(Connector c, String table) throws Exception {
+    Random r = new Random();
+    byte rowData[] = new byte[ROW_SIZE];
+    
+    r.setSeed(SEED);
+    
+    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
+    
+    for (int i = 0; i < NUM_ROWS; i++) {
+      
+      r.nextBytes(rowData);
+      TestIngest.toPrintableChars(rowData);
+      
+      scanner.setRange(new Range(new Text(rowData)));
+      
+      int count = 0;
+      
+      for (Entry<Key,Value> entry : scanner) {
+        if (!entry.getKey().getRow().equals(new Text(rowData))) {
+          throw new Exception("verification failed, unexpected row i =" + i);
+        }
+        if (!entry.getValue().equals(Integer.toString(i).getBytes())) {
+          throw new Exception("verification failed, unexpected value i =" + i + " value = " + entry.getValue());
+        }
+        count++;
+      }
+      
+      if (count != 1) {
+        throw new Exception("verification failed, unexpected count i =" + i + " count=" + count);
+      }
+      
+    }
+    
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
new file mode 100644
index 0000000..4ffef57
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/LogicalTimeIT.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class LogicalTimeIT extends MacTest {
+
+  
+  
+  @Test(timeout=120*1000)
+  public void run() throws Exception {
+    int tc = 0;
+    Connector c = getConnector();
+    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"z"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a", "z"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a", "c", "z"}, null, null, "b", 3l);
+    runMergeTest(c, "foo" + tc++, new String[] {"m"}, new String[] {"a", "y", "z"}, null, null, "b", 3l);
+    
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, null, "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, null, "b", 3l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, null, "b", 3l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, null, "b", 3l);
+    
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, "h", "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, "h", "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, "h", "b", 1l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, "h", "b", 2l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, "h", "b", 3l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, "h", "b", 3l);
+    runMergeTest(c, "foo" + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, "h", "b", 2l);
+    
+  }
+  
+  private void runMergeTest(Connector conn, String table, String[] splits, String[] inserts, String start, String end, String last, long expected) throws Exception {
+    log.info("table " + table);
+    conn.tableOperations().create(table, true, TimeType.LOGICAL);
+    TreeSet<Text> splitSet = new TreeSet<Text>();
+    for (String split : splits) {
+      splitSet.add(new Text(split));
+    }
+    conn.tableOperations().addSplits(table, splitSet);
+    
+    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    for (String row : inserts) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", "v");
+      bw.addMutation(m);
+    }
+    
+    bw.flush();
+    
+    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
+    
+    Mutation m = new Mutation(last);
+    m.put("cf", "cq", "v");
+    bw.addMutation(m);
+    bw.flush();
+    
+    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
+    scanner.setRange(new Range(last));
+    
+    bw.close();
+    
+    long time = scanner.iterator().next().getKey().getTimestamp();
+    if (time != expected)
+      throw new RuntimeException("unexpected time " + time + " " + expected);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
new file mode 100644
index 0000000..2b84d49
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MapReduceIT.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.security.MessageDigest;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.examples.simple.mapreduce.RowHash;
+import org.apache.hadoop.io.Text;
+import org.codehaus.plexus.util.Base64;
+import org.junit.Test;
+
+public class MapReduceIT extends MacTest {
+  
+  static final String tablename = "mapredf";
+  static final String input_cf = "cf-HASHTYPE";
+  static final String input_cq = "cq-NOTHASHED";
+  static final String input_cfcq = input_cf + ":" + input_cq;
+  static final String output_cq = "cq-MD4BASE64";
+  static final String output_cfcq =  input_cf + ":" + output_cq;
+  
+  @Test
+  public void test() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create(tablename);
+    BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
+    for (int i = 0; i < 10; i++) {
+      Mutation m = new Mutation("" + i);
+      m.put(input_cf, input_cq, "row" + i);
+      bw.addMutation(m);
+    }
+    bw.close();
+    
+    Process hash = cluster.exec(RowHash.class, 
+        "-i", cluster.getInstanceName(),
+        "-z", cluster.getZooKeepers(),
+        "-u", "root",
+        "-p", MacTest.PASSWORD,
+        "-t", tablename,
+        "--column", input_cfcq);
+    assertEquals(0, hash.waitFor());
+    
+    Scanner s = c.createScanner(tablename, Authorizations.EMPTY);
+    s.fetchColumn(new Text(input_cf), new Text(output_cq));
+    int i = 0;
+    for (Entry<Key,Value> entry : s) {
+      MessageDigest md = MessageDigest.getInstance("MD5");
+      byte[] check = Base64.encodeBase64(md.digest(("row" + i).getBytes()));
+      assertEquals(entry.getValue().toString(), new String(check));
+      i++;
+    }
+    
+  }
+
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
new file mode 100644
index 0000000..2934fd2
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MaxOpenIT.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+/**
+ * A functional test that exercises hitting the max open file limit on a tablet server. This test assumes there are one or two tablet servers.
+ */
+
+public class MaxOpenIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String, String> conf = new HashMap<String, String>();
+    conf.put(Property.TSERV_SCAN_MAX_OPENFILES.getKey(), "4");
+    conf.put(Property.TSERV_MAJC_MAXCONCURRENT.getKey(), "1");
+    conf.put(Property.TSERV_MAJC_THREAD_MAXOPEN.getKey(), "2");
+    cfg.setSiteConfig(conf);
+  }
+
+  private static final int NUM_TABLETS = 16;
+  private static final int NUM_TO_INGEST = 10000;
+  
+  @Test
+  public void run() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_MAJC_RATIO.getKey(), "10");
+    c.tableOperations().addSplits("test_ingest", TestIngest.getSplitPoints(0, NUM_TO_INGEST, NUM_TABLETS));
+    
+    // the following loop should create three tablets in each map file
+    for (int i = 0; i < 3; i++) {
+      TestIngest.Opts opts = new TestIngest.Opts();
+      opts.timestamp = i;
+      opts.dataSize = 50;
+      opts.rows = NUM_TO_INGEST;
+      opts.cols = 1;
+      opts.random = i;
+      TestIngest.ingest(c, opts, new BatchWriterOpts());
+      
+      c.tableOperations().flush("test_ingest", null, null, true);
+      FunctionalTestUtils.checkRFiles(c, "test_ingest", NUM_TABLETS, NUM_TABLETS, i + 1, i + 1);
+    }
+    
+    List<Range> ranges = new ArrayList<Range>(NUM_TO_INGEST);
+    
+    for (int i = 0; i < NUM_TO_INGEST; i++) {
+      ranges.add(new Range(TestIngest.generateRow(i, 0)));
+    }
+    
+    long time1 = batchScan(c, ranges, 1);
+    // run it again, now that stuff is cached on the client and sever
+    time1 = batchScan(c, ranges, 1);
+    long time2 = batchScan(c, ranges, NUM_TABLETS);
+    
+    System.out.printf("Single thread scan time   %6.2f %n", time1 / 1000.0);
+    System.out.printf("Multiple thread scan time %6.2f %n", time2 / 1000.0);
+    
+  }
+  
+  private long batchScan(Connector c, List<Range> ranges, int threads) throws Exception {
+    BatchScanner bs = c.createBatchScanner("test_ingest", TestIngest.AUTHS, threads);
+    
+    bs.setRanges(ranges);
+    
+    int count = 0;
+    
+    long t1 = System.currentTimeMillis();
+    
+    byte rval[] = new byte[50];
+    Random random = new Random();
+    
+    for (Entry<Key,Value> entry : bs) {
+      count++;
+      int row = VerifyIngest.getRow(entry.getKey());
+      int col = VerifyIngest.getCol(entry.getKey());
+      
+      if (row < 0 || row >= NUM_TO_INGEST) {
+        throw new Exception("unexcepted row " + row);
+      }
+      
+      rval = TestIngest.genRandomValue(random, rval, 2, row, col);
+      
+      if (entry.getValue().compareTo(rval) != 0) {
+        throw new Exception("unexcepted value row=" + row + " col=" + col);
+      }
+    }
+    
+    long t2 = System.currentTimeMillis();
+    
+    bs.close();
+    
+    if (count != NUM_TO_INGEST) {
+      throw new Exception("Batch Scan did not return expected number of values " + count);
+    }
+    
+    return t2 - t1;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
new file mode 100644
index 0000000..4428277
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MergeIT.java
@@ -0,0 +1,182 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.*;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.SortedSet;
+import java.util.TreeSet;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.TimeType;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.Merge;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MergeIT extends MacTest {
+  
+  SortedSet<Text> splits(String [] points) {
+    SortedSet<Text> result = new TreeSet<Text>();
+    for (String point : points)
+      result.add(new Text(point));
+    return result;
+  }
+  
+  @Test(timeout=30*1000)
+  public void merge() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test");
+    c.tableOperations().addSplits("test", splits("a b c d e f g h i j k".split(" ")));
+    BatchWriter bw = c.createBatchWriter("test", new BatchWriterConfig());
+    for (String row : "a b c d e f g h i j k".split(" ")) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", "value");
+      bw.addMutation(m);
+    }
+    bw.close();
+    c.tableOperations().flush("test", null, null, true);
+    c.tableOperations().merge("test", new Text("c1"), new Text("f1"));
+    assertEquals(8, c.tableOperations().listSplits("test").size());
+  }
+  
+  @Test(timeout=30*1000)
+  public void mergeSize() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("merge");
+    c.tableOperations().addSplits("merge", splits("a b c d e f g h i j k l m n o p q r s t u v w x y z".split(" ")));
+    BatchWriter bw = c.createBatchWriter("merge", new BatchWriterConfig());
+    for (String row : "c e f y".split(" ")) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", "mersydotesanddozeydotesanlittolamsiedives");
+      bw.addMutation(m);
+    }
+    bw.close();
+    c.tableOperations().flush("merge", null, null, true);
+    Merge merge = new Merge();
+    merge.mergomatic(c, "merge", null, null, 100, false);
+    assertArrayEquals("b c d e f x y".split(" "), toStrings(c.tableOperations().listSplits("merge")));
+    merge.mergomatic(c, "merge", null, null, 100, true);
+    assertArrayEquals("c e f y".split(" "), toStrings(c.tableOperations().listSplits("merge")));
+  }
+
+  private String[] toStrings(Collection<Text> listSplits) {
+    String[] result = new String[listSplits.size()];
+    int i = 0;
+    for (Text t : listSplits) {
+      result[i++] = t.toString();
+    }
+    return result;
+  }
+  
+  private String[] ns(String... strings) {
+    return strings;
+  }
+  
+  @Test(timeout=120*1000)
+  public void mergeTest() throws Exception {
+    int tc = 0;
+    Connector c = getConnector();
+    runMergeTest(c, "foo" + tc++, ns(), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
+    
+    runMergeTest(c, "foo" + tc++, ns("m"), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
+    runMergeTest(c, "foo" + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns("m", "n"), ns(null, "z"));
+    runMergeTest(c, "foo" + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns(null, "b"), ns("l", "m"));
+    
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns(), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns(null, "s"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("c", "m"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("n", "r"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns(null, "s"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns(null, "s"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("q", "r"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("aa", "b"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("r", "s"), ns(null, "z"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("l", "m"));
+    runMergeTest(c, "foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns("q", "r"));
+    
+  }
+  
+  private void runMergeTest(Connector c, String table, String[] splits, String[] expectedSplits, String[] inserts, String[] start, String[] end) throws Exception {
+    int count = 0;
+    
+    for (String s : start) {
+      for (String e : end) {
+        runMergeTest(c, table + "_" + count++, splits, expectedSplits, inserts, s, e);
+      }
+    }
+  }
+  
+  private void runMergeTest(Connector conn, String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end) throws Exception {
+    System.out.println("Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
+    
+    conn.tableOperations().create(table, true, TimeType.LOGICAL);
+    TreeSet<Text> splitSet = new TreeSet<Text>();
+    for (String split : splits) {
+      splitSet.add(new Text(split));
+    }
+    conn.tableOperations().addSplits(table, splitSet);
+    
+    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
+    HashSet<String> expected = new HashSet<String>();
+    for (String row : inserts) {
+      Mutation m = new Mutation(row);
+      m.put("cf", "cq", row);
+      bw.addMutation(m);
+      expected.add(row);
+    }
+    
+    bw.close();
+    
+    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
+    
+    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
+    
+    HashSet<String> observed = new HashSet<String>();
+    for (Entry<Key,Value> entry : scanner) {
+      String row = entry.getKey().getRowData().toString();
+      if (!observed.add(row)) {
+        throw new Exception("Saw data twice " + table + " " + row);
+      }
+    }
+    
+    if (!observed.equals(expected)) {
+      throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
+    }
+    
+    HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table));
+    HashSet<Text> ess = new HashSet<Text>();
+    for (String es : expectedSplits) {
+      ess.add(new Text(es));
+    }
+    
+    if (!currentSplits.equals(ess)) {
+      throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
+    }
+
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
new file mode 100644
index 0000000..bbfdcbe
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.*;
+
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.RootTable;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class MergeMetaIT extends MacTest {
+  
+  @Test(timeout=30*1000)
+  public void mergeMeta() throws Exception {
+    Connector c = getConnector();
+    SortedSet<Text> splits = new TreeSet<Text>();
+    for (String id : "1 2 3 4 5".split(" ")) {
+      splits.add(new Text(id));
+    }
+    c.tableOperations().addSplits(MetadataTable.NAME, splits);
+    for (String tableName : "a1 a2 a3 a4 a5".split(" ")) {
+      c.tableOperations().create(tableName);
+    }
+    c.tableOperations().merge(MetadataTable.NAME, null, null);
+    UtilWaitThread.sleep(2*1000);
+    Scanner s = c.createScanner(RootTable.NAME, Authorizations.EMPTY);
+    s.setRange(MetadataTable.DELETED_RANGE);
+    int count = 0;
+    for (@SuppressWarnings("unused") Entry<Key,Value> e : s) {
+      count++;
+    }
+    assertTrue(count > 0);
+    assertEquals(0, c.tableOperations().listSplits(MetadataTable.NAME).size());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
new file mode 100644
index 0000000..678eb8d
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
@@ -0,0 +1,475 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableExistsException;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.security.SecurityErrorCode;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.SystemPermission;
+import org.apache.accumulo.core.security.TablePermission;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class PermissionsIT extends MacTest {
+  private static final String TEST_USER = "test_user";
+  private static final PasswordToken TEST_PASS = new PasswordToken("test_password");
+  
+  @Test(timeout=60*1000)
+  public void systemPermissionsTest() throws Exception {
+    // verify that the test is being run by root
+    Connector c = getConnector();
+    verifyHasOnlyTheseSystemPermissions(c, c.whoami(), SystemPermission.values());
+    
+    // create the test user
+    c.securityOperations().createLocalUser(TEST_USER, TEST_PASS);
+    Connector test_user_conn = c.getInstance().getConnector(TEST_USER, TEST_PASS);
+    verifyHasNoSystemPermissions(c, TEST_USER, SystemPermission.values());
+    
+    // test each permission
+    for (SystemPermission perm : SystemPermission.values()) {
+      log.debug("Verifying the " + perm + " permission");
+      
+      // verify GRANT can't be granted
+      if (perm.equals(SystemPermission.GRANT)) {
+        try {
+          c.securityOperations().grantSystemPermission(TEST_USER, perm);
+        } catch (AccumuloSecurityException e) {
+          verifyHasNoSystemPermissions(c, TEST_USER, perm);
+          continue;
+        }
+        throw new IllegalStateException("Should NOT be able to grant GRANT");
+      }
+      
+      // test permission before and after granting it
+      testMissingSystemPermission(c, test_user_conn, perm);
+      c.securityOperations().grantSystemPermission(TEST_USER, perm);
+      verifyHasOnlyTheseSystemPermissions(c, TEST_USER, perm);
+      testGrantedSystemPermission(c, test_user_conn, perm);
+      c.securityOperations().revokeSystemPermission(TEST_USER, perm);
+      verifyHasNoSystemPermissions(c, TEST_USER, perm);
+    }
+  }
+  
+  static Map<String, String> map(Iterable<Entry<String,String>> i) {
+    Map<String, String> result = new HashMap<String, String>();
+    for (Entry<String, String> e : i) {
+      result.put(e.getKey(), e.getValue());
+    }
+    return result;
+  }
+  
+  private static void testMissingSystemPermission(Connector root_conn, Connector test_user_conn, SystemPermission perm) throws AccumuloException,
+  TableExistsException, AccumuloSecurityException, TableNotFoundException {
+    String tableName, user, password = "password";
+    log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
+    
+    // test permission prior to granting it
+    switch (perm) {
+      case CREATE_TABLE:
+        tableName = "__CREATE_TABLE_WITHOUT_PERM_TEST__";
+        try {
+          test_user_conn.tableOperations().create(tableName);
+          throw new IllegalStateException("Should NOT be able to create a table");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || root_conn.tableOperations().list().contains(tableName))
+            throw e;
+        }
+        break;
+      case DROP_TABLE:
+        tableName = "__DROP_TABLE_WITHOUT_PERM_TEST__";
+        root_conn.tableOperations().create(tableName);
+        try {
+          test_user_conn.tableOperations().delete(tableName);
+          throw new IllegalStateException("Should NOT be able to delete a table");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.tableOperations().list().contains(tableName))
+            throw e;
+        }
+        break;
+      case ALTER_TABLE:
+        tableName = "__ALTER_TABLE_WITHOUT_PERM_TEST__";
+        root_conn.tableOperations().create(tableName);
+        try {
+          test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+          throw new IllegalStateException("Should NOT be able to set a table property");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+              || map(root_conn.tableOperations().getProperties(tableName)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+            throw e;
+        }
+        root_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+        try {
+          test_user_conn.tableOperations().removeProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey());
+          throw new IllegalStateException("Should NOT be able to remove a table property");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+              || !map(root_conn.tableOperations().getProperties(tableName)).get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+            throw e;
+        }
+        String table2 = tableName + "2";
+        try {
+          test_user_conn.tableOperations().rename(tableName, table2);
+          throw new IllegalStateException("Should NOT be able to rename a table");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.tableOperations().list().contains(tableName)
+              || root_conn.tableOperations().list().contains(table2))
+            throw e;
+        }
+        break;
+      case CREATE_USER:
+        user = "__CREATE_USER_WITHOUT_PERM_TEST__";
+        try {
+          test_user_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+          throw new IllegalStateException("Should NOT be able to create a user");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+              || root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+            throw e;
+        }
+        break;
+      case DROP_USER:
+        user = "__DROP_USER_WITHOUT_PERM_TEST__";
+        root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+        try {
+          test_user_conn.securityOperations().dropLocalUser(user);
+          throw new IllegalStateException("Should NOT be able to delete a user");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
+              || !root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+            throw e;
+        }
+        break;
+      case ALTER_USER:
+        user = "__ALTER_USER_WITHOUT_PERM_TEST__";
+        root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+        try {
+          test_user_conn.securityOperations().changeUserAuthorizations(user, new Authorizations("A", "B"));
+          throw new IllegalStateException("Should NOT be able to alter a user");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
+            throw e;
+        }
+        break;
+      case SYSTEM:
+        // test for system permission would go here
+        break;
+      default:
+        throw new IllegalArgumentException("Unrecognized System Permission: " + perm);
+    }
+  }
+  
+  private static void testGrantedSystemPermission(Connector root_conn, Connector test_user_conn, SystemPermission perm) throws AccumuloException,
+  AccumuloSecurityException, TableNotFoundException, TableExistsException {
+    String tableName, user, password = "password";
+    log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
+    
+    // test permission after granting it
+    switch (perm) {
+      case CREATE_TABLE:
+        tableName = "__CREATE_TABLE_WITH_PERM_TEST__";
+        test_user_conn.tableOperations().create(tableName);
+        if (!root_conn.tableOperations().list().contains(tableName))
+          throw new IllegalStateException("Should be able to create a table");
+        break;
+      case DROP_TABLE:
+        tableName = "__DROP_TABLE_WITH_PERM_TEST__";
+        root_conn.tableOperations().create(tableName);
+        test_user_conn.tableOperations().delete(tableName);
+        if (root_conn.tableOperations().list().contains(tableName))
+          throw new IllegalStateException("Should be able to delete a table");
+        break;
+      case ALTER_TABLE:
+        tableName = "__ALTER_TABLE_WITH_PERM_TEST__";
+        String table2 = tableName + "2";
+        root_conn.tableOperations().create(tableName);
+        test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
+        Map<String,String> properties = map(root_conn.tableOperations().getProperties(tableName));
+        if (!properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+          throw new IllegalStateException("Should be able to set a table property");
+        test_user_conn.tableOperations().removeProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey());
+        properties = map(root_conn.tableOperations().getProperties(tableName));
+        if (properties.get(Property.TABLE_BLOOM_ERRORRATE.getKey()).equals("003.14159%"))
+          throw new IllegalStateException("Should be able to remove a table property");
+        test_user_conn.tableOperations().rename(tableName, table2);
+        if (root_conn.tableOperations().list().contains(tableName) || !root_conn.tableOperations().list().contains(table2))
+          throw new IllegalStateException("Should be able to rename a table");
+        break;
+      case CREATE_USER:
+        user = "__CREATE_USER_WITH_PERM_TEST__";
+        test_user_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+        if (!root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+          throw new IllegalStateException("Should be able to create a user");
+        break;
+      case DROP_USER:
+        user = "__DROP_USER_WITH_PERM_TEST__";
+        root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+        test_user_conn.securityOperations().dropLocalUser(user);
+        if (root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
+          throw new IllegalStateException("Should be able to delete a user");
+        break;
+      case ALTER_USER:
+        user = "__ALTER_USER_WITH_PERM_TEST__";
+        root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
+        test_user_conn.securityOperations().changeUserAuthorizations(user, new Authorizations("A", "B"));
+        if (root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
+          throw new IllegalStateException("Should be able to alter a user");
+        break;
+      case SYSTEM:
+        // test for system permission would go here
+        break;
+      default:
+        throw new IllegalArgumentException("Unrecognized System Permission: " + perm);
+    }
+  }
+  
+  private static void verifyHasOnlyTheseSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException,
+  AccumuloSecurityException {
+    List<SystemPermission> permList = Arrays.asList(perms);
+    for (SystemPermission p : SystemPermission.values()) {
+      if (permList.contains(p)) {
+        // should have these
+        if (!root_conn.securityOperations().hasSystemPermission(user, p))
+          throw new IllegalStateException(user + " SHOULD have system permission " + p);
+      } else {
+        // should not have these
+        if (root_conn.securityOperations().hasSystemPermission(user, p))
+          throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
+      }
+    }
+  }
+  
+  private static void verifyHasNoSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException,
+  AccumuloSecurityException {
+    for (SystemPermission p : perms)
+      if (root_conn.securityOperations().hasSystemPermission(user, p))
+        throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
+  }
+  
+  private static final String TEST_TABLE = "__TABLE_PERMISSION_TEST__";
+  
+  @Test
+  public void tablePermissionTest() throws Exception {
+    // create the test user
+    Connector c = getConnector();
+    c.securityOperations().createLocalUser(TEST_USER, TEST_PASS);
+    Connector test_user_conn = c.getInstance().getConnector(TEST_USER, TEST_PASS);
+    
+    // check for read-only access to metadata table
+    verifyHasOnlyTheseTablePermissions(c, c.whoami(), MetadataTable.NAME, TablePermission.READ,
+        TablePermission.ALTER_TABLE);
+    verifyHasOnlyTheseTablePermissions(c, TEST_USER, MetadataTable.NAME, TablePermission.READ);
+    
+    // test each permission
+    for (TablePermission perm : TablePermission.values()) {
+      log.debug("Verifying the " + perm + " permission");
+      
+      // test permission before and after granting it
+      createTestTable(c);
+      testMissingTablePermission(c, test_user_conn, perm);
+      c.securityOperations().grantTablePermission(TEST_USER, TEST_TABLE, perm);
+      verifyHasOnlyTheseTablePermissions(c, TEST_USER, TEST_TABLE, perm);
+      testGrantedTablePermission(c, test_user_conn, perm);
+      
+      createTestTable(c);
+      c.securityOperations().revokeTablePermission(TEST_USER, TEST_TABLE, perm);
+      verifyHasNoTablePermissions(c, TEST_USER, TEST_TABLE, perm);
+    }
+  }
+  
+  private void createTestTable(Connector c) throws Exception,
+  MutationsRejectedException {
+    if (!c.tableOperations().exists(TEST_TABLE)) {
+      // create the test table
+      c.tableOperations().create(TEST_TABLE);
+      // put in some initial data
+      BatchWriter writer = c.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
+      Mutation m = new Mutation(new Text("row"));
+      m.put(new Text("cf"), new Text("cq"), new Value("val".getBytes()));
+      writer.addMutation(m);
+      writer.close();
+      
+      // verify proper permissions for creator and test user
+      verifyHasOnlyTheseTablePermissions(c, c.whoami(), TEST_TABLE, TablePermission.values());
+      verifyHasNoTablePermissions(c, TEST_USER, TEST_TABLE, TablePermission.values());
+      
+    }
+  }
+  
+  private static void testMissingTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm) throws Exception {
+    Scanner scanner;
+    BatchWriter writer;
+    Mutation m;
+    log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
+    
+    // test permission prior to granting it
+    switch (perm) {
+      case READ:
+        try {
+          scanner = test_user_conn.createScanner(TEST_TABLE, Authorizations.EMPTY);
+          int i = 0;
+          for (Entry<Key,Value> entry : scanner)
+            i += 1 + entry.getKey().getRowData().length();
+          if (i != 0)
+            throw new IllegalStateException("Should NOT be able to read from the table");
+        } catch (RuntimeException e) {
+          AccumuloSecurityException se = (AccumuloSecurityException) e.getCause();
+          if (se.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+            throw se;
+        }
+        break;
+      case WRITE:
+        try {
+          writer = test_user_conn.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
+          m = new Mutation(new Text("row"));
+          m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
+          writer.addMutation(m);
+          try {
+            writer.close();
+          } catch (MutationsRejectedException e1) {
+            if (e1.getAuthorizationFailuresMap().size() > 0)
+              throw new AccumuloSecurityException(test_user_conn.whoami(), org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.PERMISSION_DENIED,
+                  e1);
+          }
+          throw new IllegalStateException("Should NOT be able to write to a table");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+            throw e;
+        }
+        break;
+      case BULK_IMPORT:
+        // test for bulk import permission would go here
+        break;
+      case ALTER_TABLE:
+        Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
+        groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
+        try {
+          test_user_conn.tableOperations().setLocalityGroups(TEST_TABLE, groups);
+          throw new IllegalStateException("User should not be able to set locality groups");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+            throw e;
+        }
+        break;
+      case DROP_TABLE:
+        try {
+          test_user_conn.tableOperations().delete(TEST_TABLE);
+          throw new IllegalStateException("User should not be able delete the table");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+            throw e;
+        }
+        break;
+      case GRANT:
+        try {
+          test_user_conn.securityOperations().grantTablePermission("root", TEST_TABLE, TablePermission.GRANT);
+          throw new IllegalStateException("User should not be able grant permissions");
+        } catch (AccumuloSecurityException e) {
+          if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
+            throw e;
+        }
+        break;
+      default:
+        throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
+    }
+  }
+  
+  private static void testGrantedTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm) throws AccumuloException,
+  TableExistsException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
+    Scanner scanner;
+    BatchWriter writer;
+    Mutation m;
+    log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
+    
+    // test permission after granting it
+    switch (perm) {
+      case READ:
+        scanner = test_user_conn.createScanner(TEST_TABLE, Authorizations.EMPTY);
+        Iterator<Entry<Key,Value>> iter = scanner.iterator();
+        while (iter.hasNext())
+          iter.next();
+        break;
+      case WRITE:
+        writer = test_user_conn.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
+        m = new Mutation(new Text("row"));
+        m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
+        writer.addMutation(m);
+        writer.close();
+        break;
+      case BULK_IMPORT:
+        // test for bulk import permission would go here
+        break;
+      case ALTER_TABLE:
+        Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
+        groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
+        break;
+      case DROP_TABLE:
+        test_user_conn.tableOperations().delete(TEST_TABLE);
+        break;
+      case GRANT:
+        test_user_conn.securityOperations().grantTablePermission("root", TEST_TABLE, TablePermission.GRANT);
+        break;
+      default:
+        throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
+    }
+  }
+  
+  private static void verifyHasOnlyTheseTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
+  AccumuloSecurityException {
+    List<TablePermission> permList = Arrays.asList(perms);
+    for (TablePermission p : TablePermission.values()) {
+      if (permList.contains(p)) {
+        // should have these
+        if (!root_conn.securityOperations().hasTablePermission(user, table, p))
+          throw new IllegalStateException(user + " SHOULD have table permission " + p + " for table " + table);
+      } else {
+        // should not have these
+        if (root_conn.securityOperations().hasTablePermission(user, table, p))
+          throw new IllegalStateException(user + " SHOULD NOT have table permission " + p + " for table " + table);
+      }
+    }
+  }
+  
+  private static void verifyHasNoTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
+  AccumuloSecurityException {
+    for (TablePermission p : perms)
+      if (root_conn.securityOperations().hasTablePermission(user, table, p))
+        throw new IllegalStateException(user + " SHOULD NOT have table permission " + p + " for table " + table);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
index a6c7802..c4c5980 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -56,7 +56,7 @@ import org.junit.Test;
 
 public class ReadWriteIT extends MacTest {
   
-  static final int ROWS = 20000;
+  static final int ROWS = 200000;
   static final int COLS = 1;
   static final String COLF = "colf";
   
@@ -81,11 +81,11 @@ public class ReadWriteIT extends MacTest {
     monitor.destroy();
   }
   
-  public void ingest(Connector connector, int rows, int cols, int width, int offset) throws Exception {
+  public static void ingest(Connector connector, int rows, int cols, int width, int offset) throws Exception {
     ingest(connector, rows, cols, width, offset, COLF);
   }
   
-  public void ingest(Connector connector, int rows, int cols, int width, int offset, String colf) throws Exception {
+  public static void ingest(Connector connector, int rows, int cols, int width, int offset, String colf) throws Exception {
     TestIngest.Opts opts = new TestIngest.Opts();
     opts.rows = rows;
     opts.cols = cols;
@@ -96,10 +96,10 @@ public class ReadWriteIT extends MacTest {
     TestIngest.ingest(connector, opts, new BatchWriterOpts());
   }
   
-  private void verify(Connector connector, int rows, int cols, int width, int offset) throws Exception {
+  private static void verify(Connector connector, int rows, int cols, int width, int offset) throws Exception {
     verify(connector, rows, cols, width, offset, COLF);
   }
-  private void verify(Connector connector, int rows, int cols, int width, int offset, String colf) throws Exception {
+  private static void verify(Connector connector, int rows, int cols, int width, int offset, String colf) throws Exception {
     ScannerOpts scannerOpts = new ScannerOpts();
     VerifyIngest.Opts opts = new VerifyIngest.Opts();
     opts.rows = rows;
@@ -135,6 +135,10 @@ public class ReadWriteIT extends MacTest {
   public void interleaved() throws Exception {
     // read and write concurrently
     final Connector connector = getConnector();
+    interleaveTest(connector);
+  }
+ 
+  static void interleaveTest(final Connector connector) throws Exception {
     final AtomicBoolean fail = new AtomicBoolean(false);
     final int CHUNKSIZE = ROWS / 10;
     ingest(connector, CHUNKSIZE, 1, 50, 0);


[13/50] [abbrv] ACCUMULO-1481 : Add tests for splitting/merging root table; refactor to consolidate metadata constants and structures in an organized way; begin consolidating metadata ops into a servicer interface to abstract the code that actually does

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java b/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
index 293c54d..f18e5bc 100644
--- a/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
+++ b/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
@@ -61,11 +61,16 @@ import org.apache.accumulo.core.gc.thrift.GCMonitorService.Processor;
 import org.apache.accumulo.core.gc.thrift.GCStatus;
 import org.apache.accumulo.core.gc.thrift.GcCycleStats;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.SecurityUtil;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.NamingThreadFactory;
 import org.apache.accumulo.core.util.ServerServices;
 import org.apache.accumulo.core.util.ServerServices.Service;
@@ -308,6 +313,7 @@ public class SimpleGarbageCollector implements Iface {
       try {
         Connector connector = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials));
         connector.tableOperations().compact(MetadataTable.NAME, null, null, true, true);
+        connector.tableOperations().compact(RootTable.NAME, null, null, true, true);
       } catch (Exception e) {
         log.warn(e, e);
       }
@@ -452,19 +458,24 @@ public class SimpleGarbageCollector implements Iface {
     }
     
     checkForBulkProcessingFiles = false;
-    Range range = MetadataTable.DELETED_RANGE;
-    candidates.addAll(getBatch(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString(), range));
+    candidates.addAll(getBatch(RootTable.NAME));
     if (candidateMemExceeded)
       return candidates;
     
-    range = MetadataTable.DELETED_RANGE;
-    candidates.addAll(getBatch(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString(), range));
+    candidates.addAll(getBatch(MetadataTable.NAME));
     return candidates;
   }
   
-  private Collection<String> getBatch(String prefix, Range range) throws Exception {
+  /**
+   * Gets a batch of delete markers from the specified table
+   * 
+   * @param tableName
+   *          the name of the system table to scan (either {@link RootTable.NAME} or {@link MetadataTable.NAME})
+   */
+  private Collection<String> getBatch(String tableName) throws Exception {
     // want to ensure GC makes progress... if the 1st N deletes are stable and we keep processing them,
     // then will never inspect deletes after N
+    Range range = MetadataSchema.DeletesSection.getRange();
     if (continueKey != null) {
       if (!range.contains(continueKey)) {
         // continue key is for some other range
@@ -474,13 +485,13 @@ public class SimpleGarbageCollector implements Iface {
       continueKey = null;
     }
     
-    Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(MetadataTable.NAME,
+    Scanner scanner = instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(tableName,
         Authorizations.EMPTY);
     scanner.setRange(range);
     List<String> result = new ArrayList<String>();
     // find candidates for deletion; chop off the prefix
     for (Entry<Key,Value> entry : scanner) {
-      String cand = entry.getKey().getRow().toString().substring(prefix.length());
+      String cand = entry.getKey().getRow().toString().substring(MetadataSchema.DeletesSection.getRowPrefix().length());
       result.add(cand);
       checkForBulkProcessingFiles |= cand.toLowerCase(Locale.ENGLISH).contains(Constants.BULK_PREFIX);
       if (almostOutOfMemory()) {
@@ -504,7 +515,11 @@ public class SimpleGarbageCollector implements Iface {
    * selected 2. They are still in use in the file column family in the METADATA table
    */
   public void confirmDeletes(SortedSet<String> candidates) throws AccumuloException {
-    
+    confirmDeletes(RootTable.NAME, candidates);
+    confirmDeletes(MetadataTable.NAME, candidates);
+  }
+  
+  private void confirmDeletes(String tableName, SortedSet<String> candidates) throws AccumuloException {
     Scanner scanner;
     if (offline) {
       // TODO
@@ -516,8 +531,8 @@ public class SimpleGarbageCollector implements Iface {
       // }
     } else {
       try {
-        scanner = new IsolatedScanner(instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(
-            MetadataTable.NAME, Authorizations.EMPTY));
+        scanner = new IsolatedScanner(instance.getConnector(credentials.getPrincipal(), CredentialHelper.extractToken(credentials)).createScanner(tableName,
+            Authorizations.EMPTY));
       } catch (AccumuloSecurityException ex) {
         throw new AccumuloException(ex);
       } catch (TableNotFoundException ex) {
@@ -530,14 +545,14 @@ public class SimpleGarbageCollector implements Iface {
       
       log.debug("Checking for bulk processing flags");
       
-      scanner.setRange(MetadataTable.BLIP_KEYSPACE);
+      scanner.setRange(MetadataSchema.BlipSection.getRange());
       
       // WARNING: This block is IMPORTANT
       // You MUST REMOVE candidates that are in the same folder as a bulk
       // processing flag!
       
       for (Entry<Key,Value> entry : scanner) {
-        String blipPath = entry.getKey().getRow().toString().substring(MetadataTable.BLIP_FLAG_PREFIX.length());
+        String blipPath = entry.getKey().getRow().toString().substring(MetadataSchema.BlipSection.getRowPrefix().length());
         Iterator<String> tailIter = candidates.tailSet(blipPath).iterator();
         int count = 0;
         while (tailIter.hasNext()) {
@@ -558,17 +573,17 @@ public class SimpleGarbageCollector implements Iface {
     // skip candidates that are still in use in the file column family in
     // the metadata table
     scanner.clearColumns();
-    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(MetadataTable.SCANFILE_COLUMN_FAMILY);
-    MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
-    TabletIterator tabletIterator = new TabletIterator(scanner, MetadataTable.KEYSPACE, false, true);
+    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    scanner.fetchColumnFamily(ScanFileColumnFamily.NAME);
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
+    TabletIterator tabletIterator = new TabletIterator(scanner, MetadataSchema.TabletsSection.getRange(), false, true);
     
     while (tabletIterator.hasNext()) {
       Map<Key,Value> tabletKeyValues = tabletIterator.next();
       
       for (Entry<Key,Value> entry : tabletKeyValues.entrySet()) {
-        if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)
-            || entry.getKey().getColumnFamily().equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
+        if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)
+            || entry.getKey().getColumnFamily().equals(ScanFileColumnFamily.NAME)) {
           
           String cf = entry.getKey().getColumnQualifier().toString();
           String delete = cf;
@@ -586,16 +601,16 @@ public class SimpleGarbageCollector implements Iface {
           // WARNING: This line is EXTREMELY IMPORTANT.
           // You MUST REMOVE candidates that are still in use
           if (candidates.remove(delete))
-            log.debug("Candidate was still in use in the METADATA table: " + delete);
+            log.debug("Candidate was still in use in the " + tableName + " table: " + delete);
           
           String path = delete.substring(0, delete.lastIndexOf('/'));
           if (candidates.remove(path))
-            log.debug("Candidate was still in use in the METADATA table: " + path);
-        } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
+            log.debug("Candidate was still in use in the " + tableName + " table: " + path);
+        } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
           String table = new String(KeyExtent.tableOfMetadataRow(entry.getKey().getRow()));
           String delete = "/" + table + entry.getValue().toString();
           if (candidates.remove(delete))
-            log.debug("Candidate was still in use in the METADATA table: " + delete);
+            log.debug("Candidate was still in use in the " + tableName + " table: " + delete);
         } else
           throw new AccumuloException("Scanner over metadata table returned unexpected column : " + entry.getKey());
       }
@@ -604,16 +619,12 @@ public class SimpleGarbageCollector implements Iface {
   
   final static String METADATA_TABLE_DIR = "/" + MetadataTable.ID;
   
-  private static void putMarkerDeleteMutation(final String delete, final BatchWriter writer, final BatchWriter rootWriter) throws MutationsRejectedException {
-    if (delete.contains(METADATA_TABLE_DIR)) {
-      Mutation m = new Mutation(new Text(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString() + delete));
-      m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
-      rootWriter.addMutation(m);
-    } else {
-      Mutation m = new Mutation(new Text(MetadataTable.DELETED_RANGE.getStartKey().getRow().toString() + delete));
-      m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
-      writer.addMutation(m);
-    }
+  private static void putMarkerDeleteMutation(final String delete, final BatchWriter metadataWriter, final BatchWriter rootWriter)
+      throws MutationsRejectedException {
+    BatchWriter writer = delete.contains(METADATA_TABLE_DIR) ? rootWriter : metadataWriter;
+    Mutation m = new Mutation(MetadataSchema.DeletesSection.getRowPrefix() + delete);
+    m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
+    writer.addMutation(m);
   }
   
   /**
@@ -629,9 +640,13 @@ public class SimpleGarbageCollector implements Iface {
       try {
         c = instance.getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
         writer = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-        rootWriter = c.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
-      } catch (Exception e) {
-        log.error("Unable to create writer to remove file from the " + MetadataTable.NAME + " table", e);
+        rootWriter = c.createBatchWriter(RootTable.NAME, new BatchWriterConfig());
+      } catch (AccumuloException e) {
+        log.error("Unable to connect to Accumulo to write deletes", e);
+      } catch (AccumuloSecurityException e) {
+        log.error("Unable to connect to Accumulo to write deletes", e);
+      } catch (TableNotFoundException e) {
+        log.error("Unable to create writer to remove file from the " + e.getTableName() + " table", e);
       }
     }
     // when deleting a dir and all files in that dir, only need to delete the dir

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java b/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
index 4e8fefd..d8bcebe 100644
--- a/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
+++ b/server/src/main/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilter.java
@@ -27,7 +27,7 @@ import org.apache.accumulo.core.iterators.Filter;
 import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
 import org.apache.log4j.Logger;
@@ -48,7 +48,7 @@ public class MetadataBulkLoadFilter extends Filter {
   
   @Override
   public boolean accept(Key k, Value v) {
-    if (!k.isDeleted() && k.compareColumnFamily(MetadataTable.BULKFILE_COLUMN_FAMILY) == 0) {
+    if (!k.isDeleted() && k.compareColumnFamily(TabletsSection.BulkFileColumnFamily.NAME) == 0) {
       long txid = Long.valueOf(v.toString());
       
       Status status = bulkTxStatusCache.get(txid);
@@ -69,10 +69,10 @@ public class MetadataBulkLoadFilter extends Filter {
       
       return status == Status.ACTIVE;
     }
-
+    
     return true;
   }
-
+  
   @Override
   public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
     super.init(source, options, env);
@@ -80,7 +80,7 @@ public class MetadataBulkLoadFilter extends Filter {
     if (env.getIteratorScope() == IteratorScope.scan) {
       throw new IOException("This iterator not intended for use at scan time");
     }
-
+    
     bulkTxStatusCache = new HashMap<Long,MetadataBulkLoadFilter.Status>();
     arbitrator = getArbitrator();
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/Master.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/Master.java b/server/src/main/java/org/apache/accumulo/server/master/Master.java
index 4aab0df..b5ffd0a 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/Master.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/Master.java
@@ -69,12 +69,15 @@ import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletLoadState;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
 import org.apache.accumulo.core.master.thrift.TabletSplit;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SecurityUtil;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ByteBufferUtil;
 import org.apache.accumulo.core.util.Daemon;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.AgeOffStore;
@@ -131,7 +134,7 @@ import org.apache.accumulo.server.security.SecurityOperation;
 import org.apache.accumulo.server.util.AddressUtil;
 import org.apache.accumulo.server.util.DefaultMap;
 import org.apache.accumulo.server.util.Halt;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.util.SystemPropUtil;
 import org.apache.accumulo.server.util.TServerUtils;
 import org.apache.accumulo.server.util.TablePropUtil;
@@ -288,7 +291,7 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
           @Override
           public void run() {
             try {
-              MetadataTable.moveMetaDeleteMarkers(instance, SecurityConstants.getSystemCredentials());
+              MetadataTableUtil.moveMetaDeleteMarkers(instance, SecurityConstants.getSystemCredentials());
               Accumulo.updateAccumuloVersion(fs);
               
               log.info("Upgrade complete");
@@ -379,8 +382,8 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
   }
   
   private void checkNotMetadataTable(String tableName, TableOperation operation) throws ThriftTableOperationException {
-    if (tableName.compareTo(MetadataTable.NAME) == 0) {
-      String why = "Table names cannot be == " + MetadataTable.NAME;
+    if (MetadataTable.NAME.equals(tableName) || RootTable.NAME.equals(tableName)) {
+      String why = "Table names cannot be == " + RootTable.NAME + " or " + MetadataTable.NAME;
       log.warn(why);
       throw new ThriftTableOperationException(null, tableName, operation, TableOperationExceptionType.OTHER, why);
     }
@@ -527,10 +530,10 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
         try {
           Connector conn = getConnector();
           Scanner scanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
-          MetadataTable.FLUSH_COLUMN.fetch(scanner);
-          MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
-          scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
-          scanner.fetchColumnFamily(MetadataTable.LOG_COLUMN_FAMILY);
+          TabletsSection.ServerColumnFamily.FLUSH_COLUMN.fetch(scanner);
+          TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
+          scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+          scanner.fetchColumnFamily(LogColumnFamily.NAME);
           scanner.setRange(new KeyExtent(new Text(tableId), null, ByteBufferUtil.toText(startRow)).toMetadataRange());
           
           RowIterator ri = new RowIterator(scanner);
@@ -553,14 +556,14 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
               entry = row.next();
               Key key = entry.getKey();
               
-              if (MetadataTable.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier())) {
+              if (TabletsSection.ServerColumnFamily.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier())) {
                 tabletFlushID = Long.parseLong(entry.getValue().toString());
               }
               
-              if (MetadataTable.LOG_COLUMN_FAMILY.equals(key.getColumnFamily()))
+              if (LogColumnFamily.NAME.equals(key.getColumnFamily()))
                 logs++;
               
-              if (MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY.equals(key.getColumnFamily())) {
+              if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily())) {
                 online = true;
                 server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
               }
@@ -899,10 +902,6 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
           Text startRow = ByteBufferUtil.toText(arguments.get(1));
           Text endRow = ByteBufferUtil.toText(arguments.get(2));
           final String tableId = checkTableId(tableName, TableOperation.MERGE);
-          if (tableId.equals(RootTable.ID)) {
-            throw new ThriftTableOperationException(null, tableName, TableOperation.MERGE, TableOperationExceptionType.OTHER,
-                "cannot merge or split the root table");
-          }
           log.debug("Creating merge op: " + tableId + " " + startRow + " " + endRow);
           
           if (!security.canMerge(c, tableId))
@@ -1027,8 +1026,6 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
   }
   
   public MergeInfo getMergeInfo(KeyExtent tablet) {
-    if (tablet.isRootTablet())
-      return new MergeInfo();
     return getMergeInfo(tablet.getTableId());
   }
   
@@ -1251,7 +1248,7 @@ public class Master implements LiveTServerSet.Listener, TableObserver, CurrentSt
     private void cleanupMutations() throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
       Connector connector = getConnector();
       Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-      MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+      TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
       Set<KeyExtent> found = new HashSet<KeyExtent>();
       for (Entry<Key,Value> entry : scanner) {
         KeyExtent extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java b/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java
index 37f519e..c0479dd 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/TabletGroupWatcher.java
@@ -24,11 +24,11 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.TreeSet;
-import java.util.Map.Entry;
 
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -46,10 +46,14 @@ import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.tabletserver.thrift.NotServingTabletException;
 import org.apache.accumulo.core.util.Daemon;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.master.LiveTServerSet.TServerConnection;
@@ -68,7 +72,7 @@ import org.apache.accumulo.server.master.state.TabletStateStore;
 import org.apache.accumulo.server.master.state.tables.TableManager;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.tabletserver.TabletTime;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.hadoop.io.Text;
 import org.apache.thrift.TException;
 
@@ -396,32 +400,32 @@ class TabletGroupWatcher extends Daemon {
           extent.getEndRow()), true);
       Scanner scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
       scanner.setRange(deleteRange);
-      MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
-      MetadataTable.TIME_COLUMN.fetch(scanner);
-      scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
-      scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
+      TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(scanner);
+      scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+      scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
       Set<FileRef> datafiles = new TreeSet<FileRef>();
       for (Entry<Key,Value> entry : scanner) {
         Key key = entry.getKey();
-        if (key.compareColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY) == 0) {
+        if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
           datafiles.add(new FileRef(this.master.fs, key));
           if (datafiles.size() > 1000) {
-            MetadataTable.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
+            MetadataTableUtil.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
             datafiles.clear();
           }
-        } else if (MetadataTable.TIME_COLUMN.hasColumns(key)) {
+        } else if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
           timeType = entry.getValue().toString().charAt(0);
-        } else if (key.compareColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
+        } else if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
           throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
-        } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(key)) {
+        } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
           datafiles.add(new FileRef(this.master.fs, key));
           if (datafiles.size() > 1000) {
-            MetadataTable.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
+            MetadataTableUtil.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
             datafiles.clear();
           }
         }
       }
-      MetadataTable.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
+      MetadataTableUtil.addDeleteEntries(extent, datafiles, SecurityConstants.getSystemCredentials());
       BatchWriter bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
       try {
         deleteTablets(info, deleteRange, bw, conn);
@@ -434,8 +438,8 @@ class TabletGroupWatcher extends Daemon {
         bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
         try {
           Mutation m = new Mutation(followingTablet.getMetadataEntry());
-          MetadataTable.PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(extent.getPrevEndRow()));
-          MetadataTable.CHOPPED_COLUMN.putDelete(m);
+          TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(extent.getPrevEndRow()));
+          ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
           bw.addMutation(m);
           bw.flush();
         } finally {
@@ -444,7 +448,7 @@ class TabletGroupWatcher extends Daemon {
       } else {
         // Recreate the default tablet to hold the end of the table
         Master.log.debug("Recreating the last tablet to point to " + extent.getPrevEndRow());
-        MetadataTable.addTablet(new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), Constants.DEFAULT_TABLET_LOCATION,
+        MetadataTableUtil.addTablet(new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), Constants.DEFAULT_TABLET_LOCATION,
             SecurityConstants.getSystemCredentials(), timeType, this.master.masterLock);
       }
     } catch (Exception ex) {
@@ -477,25 +481,25 @@ class TabletGroupWatcher extends Daemon {
       bw = conn.createBatchWriter(targetSystemTable, new BatchWriterConfig());
       Scanner scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
       scanner.setRange(scanRange);
-      MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
-      MetadataTable.TIME_COLUMN.fetch(scanner);
-      MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
-      scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+      TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
+      TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(scanner);
+      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
+      scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
       Mutation m = new Mutation(stopRow);
       String maxLogicalTime = null;
       for (Entry<Key,Value> entry : scanner) {
         Key key = entry.getKey();
         Value value = entry.getValue();
-        if (key.getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+        if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
           m.put(key.getColumnFamily(), key.getColumnQualifier(), value);
           fileCount++;
-        } else if (MetadataTable.PREV_ROW_COLUMN.hasColumns(key) && firstPrevRowValue == null) {
+        } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key) && firstPrevRowValue == null) {
           Master.log.debug("prevRow entry for lowest tablet is " + value);
           firstPrevRowValue = new Value(value);
-        } else if (MetadataTable.TIME_COLUMN.hasColumns(key)) {
+        } else if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
           maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, value.toString());
-        } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(key)) {
-          bw.addMutation(MetadataTable.createDeleteMutation(range.getTableId().toString(), entry.getValue().toString()));
+        } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
+          bw.addMutation(MetadataTableUtil.createDeleteMutation(range.getTableId().toString(), entry.getValue().toString()));
         }
       }
       
@@ -503,15 +507,15 @@ class TabletGroupWatcher extends Daemon {
       // the loop above
       scanner = conn.createScanner(targetSystemTable, Authorizations.EMPTY);
       scanner.setRange(new Range(stopRow));
-      MetadataTable.TIME_COLUMN.fetch(scanner);
+      TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(scanner);
       for (Entry<Key,Value> entry : scanner) {
-        if (MetadataTable.TIME_COLUMN.hasColumns(entry.getKey())) {
+        if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(entry.getKey())) {
           maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, entry.getValue().toString());
         }
       }
       
       if (maxLogicalTime != null)
-        MetadataTable.TIME_COLUMN.put(m, new Value(maxLogicalTime.getBytes()));
+        TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(maxLogicalTime.getBytes()));
       
       if (!m.getUpdates().isEmpty()) {
         bw.addMutation(m);
@@ -536,7 +540,7 @@ class TabletGroupWatcher extends Daemon {
       
       // Clean-up the last chopped marker
       m = new Mutation(stopRow);
-      MetadataTable.CHOPPED_COLUMN.putDelete(m);
+      ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
       bw.addMutation(m);
       bw.flush();
       
@@ -586,7 +590,7 @@ class TabletGroupWatcher extends Daemon {
     try {
       Connector conn = this.master.getConnector();
       Scanner scanner = conn.createScanner(range.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY);
-      MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
+      TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
       KeyExtent start = new KeyExtent(range.getTableId(), range.getEndRow(), null);
       scanner.setRange(new Range(start.getMetadataEntry(), null));
       Iterator<Entry<Key,Value>> iterator = scanner.iterator();
@@ -645,4 +649,4 @@ class TabletGroupWatcher extends Daemon {
     }
   }
   
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java b/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java
index 14566e1..22fb17b 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/balancer/ChaoticLoadBalancer.java
@@ -29,8 +29,8 @@ import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.tabletserver.thrift.TabletStats;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.master.state.TabletMigration;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java b/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java
index 64172ee..ca1529e 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/state/MergeStats.java
@@ -28,9 +28,9 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
@@ -66,8 +66,6 @@ public class MergeStats {
   }
   
   public void update(KeyExtent ke, TabletState state, boolean chopped, boolean hasWALs) {
-    if (ke.isRootTablet())
-      return;
     if (info.getState().equals(MergeState.NONE))
       return;
     if (!upperSplit && info.getExtent().getEndRow().equals(ke.getPrevEndRow())) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java b/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
index ec25f85..b58e618 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataStateStore.java
@@ -26,10 +26,11 @@ import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.MutationsRejectedException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.hadoop.io.Text;
@@ -67,7 +68,7 @@ public class MetaDataStateStore extends TabletStateStore {
   
   @Override
   public Iterator<TabletLocationState> iterator() {
-    return new MetaDataTableScanner(instance, auths, RootTable.METADATA_TABLETS_RANGE, state);
+    return new MetaDataTableScanner(instance, auths, MetadataSchema.TabletsSection.getRange(), state);
   }
   
   @Override
@@ -77,8 +78,8 @@ public class MetaDataStateStore extends TabletStateStore {
       for (Assignment assignment : assignments) {
         Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
         Text cq = assignment.server.asColumnQualifier();
-        m.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, cq, assignment.server.asMutationValue());
-        m.putDelete(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, cq);
+        m.put(TabletsSection.CurrentLocationColumnFamily.NAME, cq, assignment.server.asMutationValue());
+        m.putDelete(TabletsSection.FutureLocationColumnFamily.NAME, cq);
         writer.addMutation(m);
       }
     } catch (Exception ex) {
@@ -110,7 +111,7 @@ public class MetaDataStateStore extends TabletStateStore {
     try {
       for (Assignment assignment : assignments) {
         Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
-        m.put(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, assignment.server.asColumnQualifier(), assignment.server.asMutationValue());
+        m.put(TabletsSection.FutureLocationColumnFamily.NAME, assignment.server.asColumnQualifier(), assignment.server.asMutationValue());
         writer.addMutation(m);
       }
     } catch (Exception ex) {
@@ -132,10 +133,10 @@ public class MetaDataStateStore extends TabletStateStore {
       for (TabletLocationState tls : tablets) {
         Mutation m = new Mutation(tls.extent.getMetadataEntry());
         if (tls.current != null) {
-          m.putDelete(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, tls.current.asColumnQualifier());
+          m.putDelete(TabletsSection.CurrentLocationColumnFamily.NAME, tls.current.asColumnQualifier());
         }
         if (tls.future != null) {
-          m.putDelete(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, tls.future.asColumnQualifier());
+          m.putDelete(TabletsSection.FutureLocationColumnFamily.NAME, tls.future.asColumnQualifier());
         }
         writer.addMutation(m);
       }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java b/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java
index f58997f..8ca79a8 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/state/MetaDataTableScanner.java
@@ -36,10 +36,13 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.WholeRowIterator;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.master.state.TabletLocationState.BadLocationStateException;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
@@ -69,11 +72,11 @@ public class MetaDataTableScanner implements Iterator<TabletLocationState> {
   }
   
   static public void configureScanner(ScannerBase scanner, CurrentState state) {
-    MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
-    scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(MetadataTable.LOG_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(MetadataTable.CHOPPED_COLUMN_FAMILY);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+    scanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME);
+    scanner.fetchColumnFamily(LogColumnFamily.NAME);
+    scanner.fetchColumnFamily(ChoppedColumnFamily.NAME);
     scanner.addScanIterator(new IteratorSetting(1000, "wholeRows", WholeRowIterator.class));
     IteratorSetting tabletChange = new IteratorSetting(1001, "tabletChange", TabletStateChangeIterator.class);
     if (state != null) {
@@ -142,30 +145,30 @@ public class MetaDataTableScanner implements Iterator<TabletLocationState> {
       Text cf = key.getColumnFamily();
       Text cq = key.getColumnQualifier();
       
-      if (cf.compareTo(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY) == 0) {
+      if (cf.compareTo(TabletsSection.FutureLocationColumnFamily.NAME) == 0) {
         TServerInstance location = new TServerInstance(entry.getValue(), cq);
         if (future != null) {
           throw new BadLocationStateException("found two assignments for the same extent " + key.getRow() + ": " + future + " and " + location);
         }
         future = location;
-      } else if (cf.compareTo(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
+      } else if (cf.compareTo(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
         TServerInstance location = new TServerInstance(entry.getValue(), cq);
         if (current != null) {
           throw new BadLocationStateException("found two locations for the same extent " + key.getRow() + ": " + current + " and " + location);
         }
         current = location;
-      } else if (cf.compareTo(MetadataTable.LOG_COLUMN_FAMILY) == 0) {
+      } else if (cf.compareTo(LogColumnFamily.NAME) == 0) {
         String[] split = entry.getValue().toString().split("\\|")[0].split(";");
         walogs.add(Arrays.asList(split));
-      } else if (cf.compareTo(MetadataTable.LAST_LOCATION_COLUMN_FAMILY) == 0) {
+      } else if (cf.compareTo(TabletsSection.LastLocationColumnFamily.NAME) == 0) {
         TServerInstance location = new TServerInstance(entry.getValue(), cq);
         if (last != null) {
           throw new BadLocationStateException("found two last locations for the same extent " + key.getRow() + ": " + last + " and " + location);
         }
         last = new TServerInstance(entry.getValue(), cq);
-      } else if (cf.compareTo(MetadataTable.CHOPPED_COLUMN_FAMILY) == 0) {
+      } else if (cf.compareTo(ChoppedColumnFamily.NAME) == 0) {
         chopped = true;
-      } else if (MetadataTable.PREV_ROW_COLUMN.equals(cf, cq)) {
+      } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(cf, cq)) {
         extent = new KeyExtent(row, entry.getValue());
       }
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java b/server/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java
index 52a094c..1aa4b4e 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/state/RootTabletStateStore.java
@@ -19,8 +19,9 @@ package org.apache.accumulo.server.master.state;
 import java.util.Iterator;
 
 import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.RootTable;
 
 public class RootTabletStateStore extends MetaDataStateStore {
   
@@ -34,7 +35,7 @@ public class RootTabletStateStore extends MetaDataStateStore {
   
   @Override
   public Iterator<TabletLocationState> iterator() {
-    return new MetaDataTableScanner(instance, auths, RootTable.METADATA_TABLETS_RANGE, state, RootTable.NAME);
+    return new MetaDataTableScanner(instance, auths, MetadataSchema.TabletsSection.getRange(), state, RootTable.NAME);
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/state/TServerInstance.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/state/TServerInstance.java b/server/src/main/java/org/apache/accumulo/server/master/state/TServerInstance.java
index 7b3e6ee..ab3d69f 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/state/TServerInstance.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/state/TServerInstance.java
@@ -22,7 +22,7 @@ import java.net.InetSocketAddress;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.server.util.AddressUtil;
 import org.apache.hadoop.io.Text;
 
@@ -58,19 +58,19 @@ public class TServerInstance implements Comparable<TServerInstance>, Serializabl
   }
   
   public void putLocation(Mutation m) {
-    m.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, asColumnQualifier(), asMutationValue());
+    m.put(TabletsSection.CurrentLocationColumnFamily.NAME, asColumnQualifier(), asMutationValue());
   }
   
   public void putFutureLocation(Mutation m) {
-    m.put(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY, asColumnQualifier(), asMutationValue());
+    m.put(TabletsSection.FutureLocationColumnFamily.NAME, asColumnQualifier(), asMutationValue());
   }
   
   public void putLastLocation(Mutation m) {
-    m.put(MetadataTable.LAST_LOCATION_COLUMN_FAMILY, asColumnQualifier(), asMutationValue());
+    m.put(TabletsSection.LastLocationColumnFamily.NAME, asColumnQualifier(), asMutationValue());
   }
   
   public void clearLastLocation(Mutation m) {
-    m.putDelete(MetadataTable.LAST_LOCATION_COLUMN_FAMILY, asColumnQualifier());
+    m.putDelete(TabletsSection.LastLocationColumnFamily.NAME, asColumnQualifier());
   }
   
   @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java b/server/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java
index bcc342c..09a316c 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/state/ZooTabletStateStore.java
@@ -23,9 +23,9 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.util.AddressUtil;
-import org.apache.accumulo.core.util.RootTable;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.log4j.Logger;
 
@@ -82,7 +82,7 @@ public class ZooTabletStateStore extends TabletStateStore {
           for (String entry : store.getChildren(RootTable.ZROOT_TABLET_WALOGS)) {
             byte[] logInfo = store.get(RootTable.ZROOT_TABLET_WALOGS + "/" + entry);
             if (logInfo != null) {
-              MetadataTable.LogEntry logEntry = new MetadataTable.LogEntry();
+              MetadataTableUtil.LogEntry logEntry = new MetadataTableUtil.LogEntry();
               logEntry.fromBytes(logInfo);
               logs.add(logEntry.logSet);
               log.debug("root tablet logSet " + logEntry.logSet);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
index e8146f9..4f44d79 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/BulkImport.java
@@ -54,6 +54,8 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.SimpleThreadPool;
@@ -68,7 +70,7 @@ import org.apache.accumulo.server.master.Master;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.tabletserver.UniqueNameAllocator;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.DistributedWorkQueue;
 import org.apache.accumulo.server.zookeeper.TransactionWatcher.ZooArbitrator;
 import org.apache.accumulo.trace.instrument.TraceExecutorService;
@@ -181,8 +183,7 @@ public class BulkImport extends MasterRepo {
   
   private Path createNewBulkDir(VolumeManager fs, String tableId) throws IOException {
     String tableDir = null;
-    loop:
-    for (String dir : fs.getFileSystems().keySet()) {
+    loop: for (String dir : fs.getFileSystems().keySet()) {
       if (this.sourceDir.startsWith(dir)) {
         for (String path : ServerConstants.getTablesDirs()) {
           if (path.startsWith(dir)) {
@@ -221,7 +222,7 @@ public class BulkImport extends MasterRepo {
   private String prepareBulkImport(VolumeManager fs, String dir, String tableId) throws IOException {
     Path bulkDir = createNewBulkDir(fs, tableId);
     
-    MetadataTable.addBulkLoadInProgressFlag("/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
+    MetadataTableUtil.addBulkLoadInProgressFlag("/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
     
     Path dirPath = new Path(dir);
     FileStatus[] mapFiles = fs.listStatus(dirPath);
@@ -308,11 +309,11 @@ class CleanUpBulkImport extends MasterRepo {
   public Repo<Master> call(long tid, Master master) throws Exception {
     log.debug("removing the bulk processing flag file in " + bulk);
     Path bulkDir = new Path(bulk);
-    MetadataTable.removeBulkLoadInProgressFlag("/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
-    MetadataTable.addDeleteEntry(tableId, "/" + bulkDir.getName());
+    MetadataTableUtil.removeBulkLoadInProgressFlag("/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
+    MetadataTableUtil.addDeleteEntry(tableId, "/" + bulkDir.getName());
     log.debug("removing the metadata table markers for loaded files");
     Connector conn = master.getConnector();
-    MetadataTable.removeBulkLoadEntries(conn, tableId, tid);
+    MetadataTableUtil.removeBulkLoadEntries(conn, tableId, tid);
     log.debug("releasing HDFS reservations for " + source + " and " + error);
     Utils.unreserveHdfsDirectory(source, tid);
     Utils.unreserveHdfsDirectory(error, tid);
@@ -414,7 +415,7 @@ class CopyFailed extends MasterRepo {
     Connector conn = master.getConnector();
     Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
     mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
-    mscanner.fetchColumnFamily(MetadataTable.BULKFILE_COLUMN_FAMILY);
+    mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
     
     for (Entry<Key,Value> entry : mscanner) {
       if (Long.parseLong(entry.getValue().toString()) == tid) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
index dc36087..8bf437d 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CloneTable.java
@@ -33,7 +33,7 @@ import org.apache.accumulo.server.master.Master;
 import org.apache.accumulo.server.master.state.tables.TableManager;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityConstants;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.log4j.Logger;
 
 class CloneInfo implements Serializable {
@@ -108,14 +108,14 @@ class CloneMetadata extends MasterRepo {
     Instance instance = HdfsZooInstance.getInstance();
     // need to clear out any metadata entries for tableId just in case this
     // died before and is executing again
-    MetadataTable.deleteTable(cloneInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
-    MetadataTable.cloneTable(instance, cloneInfo.srcTableId, cloneInfo.tableId);
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
+    MetadataTableUtil.cloneTable(instance, cloneInfo.srcTableId, cloneInfo.tableId);
     return new FinishCloneTable(cloneInfo);
   }
   
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTable.deleteTable(cloneInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
+    MetadataTableUtil.deleteTable(cloneInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java
index 5b0f489..09046f7 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CompactRange.java
@@ -41,9 +41,10 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooReaderWriter.Mutator;
@@ -101,9 +102,9 @@ class CompactionDriver extends MasterRepo {
       range = range.clip(new Range(RootTable.EXTENT.getMetadataEntry(), false, null, true));
     
     scanner.setRange(range);
-    MetadataTable.COMPACT_COLUMN.fetch(scanner);
-    MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
-    scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+    TabletsSection.ServerColumnFamily.COMPACT_COLUMN.fetch(scanner);
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
     
     long t1 = System.currentTimeMillis();
     RowIterator ri = new RowIterator(scanner);
@@ -122,10 +123,10 @@ class CompactionDriver extends MasterRepo {
         entry = row.next();
         Key key = entry.getKey();
         
-        if (MetadataTable.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
+        if (TabletsSection.ServerColumnFamily.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
           tabletCompactID = Long.parseLong(entry.getValue().toString());
         
-        if (MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY.equals(key.getColumnFamily()))
+        if (TabletsSection.CurrentLocationColumnFamily.NAME.equals(key.getColumnFamily()))
           server = new TServerInstance(entry.getValue(), key.getColumnQualifier());
       }
       

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
index f69126a..d9acd8d 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/CreateTable.java
@@ -39,7 +39,7 @@ import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.security.SecurityOperation;
 import org.apache.accumulo.server.tabletserver.TabletTime;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.util.TablePropUtil;
 import org.apache.accumulo.server.util.TabletOperations;
 import org.apache.hadoop.fs.Path;
@@ -115,7 +115,7 @@ class PopulateMetadata extends MasterRepo {
   public Repo<Master> call(long tid, Master environment) throws Exception {
     
     KeyExtent extent = new KeyExtent(new Text(tableInfo.tableId), null, null);
-    MetadataTable.addTablet(extent, Constants.DEFAULT_TABLET_LOCATION, SecurityConstants.getSystemCredentials(), tableInfo.timeType,
+    MetadataTableUtil.addTablet(extent, Constants.DEFAULT_TABLET_LOCATION, SecurityConstants.getSystemCredentials(), tableInfo.timeType,
         environment.getMasterLock());
     
     return new FinishCreateTable(tableInfo);
@@ -124,7 +124,7 @@ class PopulateMetadata extends MasterRepo {
   
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTable.deleteTable(tableInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
index 7b4c69b..7d6186e 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/DeleteTable.java
@@ -33,6 +33,9 @@ import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.user.GrepIterator;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.server.ServerConstants;
@@ -45,7 +48,7 @@ import org.apache.accumulo.server.master.state.tables.TableManager;
 import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityConstants;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
@@ -127,8 +130,8 @@ class CleanUp extends MasterRepo {
       Connector conn = master.getConnector();
       BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8);
       try {
-        bs.setRanges(Collections.singleton(MetadataTable.NON_ROOT_KEYSPACE));
-        bs.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+        bs.setRanges(Collections.singleton(MetadataSchema.TabletsSection.getRange()));
+        bs.fetchColumnFamily(DataFileColumnFamily.NAME);
         IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
         GrepIterator.setTerm(cfg, "../" + tableId + "/");
         bs.addScanIterator(cfg);
@@ -152,7 +155,7 @@ class CleanUp extends MasterRepo {
       // Intentionally do not pass master lock. If master loses lock, this operation may complete before master can kill itself.
       // If the master lock passed to deleteTable, it is possible that the delete mutations will be dropped. If the delete operations
       // are dropped and the operation completes, then the deletes will not be repeated.
-      MetadataTable.deleteTable(tableId, refCount != 0, SecurityConstants.getSystemCredentials(), null);
+      MetadataTableUtil.deleteTable(tableId, refCount != 0, SecurityConstants.getSystemCredentials(), null);
     } catch (Exception e) {
       log.error("error deleting " + tableId + " from metadata table", e);
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java
index 5ec1eb9..a59f9a9 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/ExportTable.java
@@ -45,8 +45,11 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.conf.ServerConfiguration;
@@ -100,8 +103,8 @@ class WriteExportFiles extends MasterRepo {
     metaScanner.setRange(new KeyExtent(new Text(tableInfo.tableID), null, null).toMetadataRange());
     
     // scan for locations
-    metaScanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
-    metaScanner.fetchColumnFamily(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY);
+    metaScanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+    metaScanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME);
     
     if (metaScanner.iterator().hasNext()) {
       return 500;
@@ -110,7 +113,7 @@ class WriteExportFiles extends MasterRepo {
     // use the same range to check for walogs that we used to check for hosted (or future hosted) tablets
     // this is done as a separate scan after we check for locations, because walogs are okay only if there is no location
     metaScanner.clearColumns();
-    metaScanner.fetchColumnFamily(MetadataTable.LOG_COLUMN_FAMILY);
+    metaScanner.fetchColumnFamily(LogColumnFamily.NAME);
     
     if (metaScanner.iterator().hasNext()) {
       throw new ThriftTableOperationException(tableInfo.tableID, tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER,
@@ -204,23 +207,23 @@ class WriteExportFiles extends MasterRepo {
     }
   }
   
-  private static Map<String,String> exportMetadata(VolumeManager fs, Connector conn, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut) throws IOException,
-      TableNotFoundException {
+  private static Map<String,String> exportMetadata(VolumeManager fs, Connector conn, String tableID, ZipOutputStream zipOut, DataOutputStream dataOut)
+      throws IOException, TableNotFoundException {
     zipOut.putNextEntry(new ZipEntry(Constants.EXPORT_METADATA_FILE));
     
     Map<String,String> uniqueFiles = new HashMap<String,String>();
     
     Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    metaScanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
-    MetadataTable.PREV_ROW_COLUMN.fetch(metaScanner);
-    MetadataTable.TIME_COLUMN.fetch(metaScanner);
+    metaScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(metaScanner);
+    TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(metaScanner);
     metaScanner.setRange(new KeyExtent(new Text(tableID), null, null).toMetadataRange());
     
     for (Entry<Key,Value> entry : metaScanner) {
       entry.getKey().write(dataOut);
       entry.getValue().write(dataOut);
       
-      if (entry.getKey().getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
         String path = fs.getFullPath(entry.getKey()).toString();
         String tokens[] = path.split("/");
         if (tokens.length < 1) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
index f4f8cd0..ae6930b 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/ImportTable.java
@@ -46,6 +46,9 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileOperations;
 import org.apache.accumulo.core.master.state.tables.TableState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.util.FastFormat;
 import org.apache.accumulo.fate.Repo;
@@ -59,7 +62,7 @@ import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.security.SecurityOperation;
 import org.apache.accumulo.server.tabletserver.UniqueNameAllocator;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.util.TablePropUtil;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -235,7 +238,7 @@ class PopulateMetadataTable extends MasterRepo {
             
             Text cq;
             
-            if (key.getColumnFamily().equals(MetadataTable.DATAFILE_COLUMN_FAMILY)) {
+            if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
               String oldName = new Path(key.getColumnQualifier().toString()).getName();
               String newName = fileNameMappings.get(oldName);
               
@@ -246,19 +249,19 @@ class PopulateMetadataTable extends MasterRepo {
             
             if (m == null) {
               m = new Mutation(metadataRow);
-              MetadataTable.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
+              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
               currentRow = metadataRow;
             }
             
             if (!currentRow.equals(metadataRow)) {
               mbw.addMutation(m);
               m = new Mutation(metadataRow);
-              MetadataTable.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
+              TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
             }
             
             m.put(key.getColumnFamily(), cq, val);
             
-            if (endRow == null && MetadataTable.PREV_ROW_COLUMN.hasColumns(key)) {
+            if (endRow == null && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
               mbw.addMutation(m);
               break; // its the last column in the last row
             }
@@ -290,7 +293,7 @@ class PopulateMetadataTable extends MasterRepo {
   
   @Override
   public void undo(long tid, Master environment) throws Exception {
-    MetadataTable.deleteTable(tableInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
+    MetadataTableUtil.deleteTable(tableInfo.tableId, false, SecurityConstants.getSystemCredentials(), environment.getMasterLock());
   }
 }
 
@@ -315,7 +318,7 @@ class MapImportFileNames extends MasterRepo {
       VolumeManager fs = environment.getFileSystem();
       
       fs.mkdirs(new Path(tableInfo.importDir));
-
+      
       FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
       
       UniqueNameAllocator namer = UniqueNameAllocator.getInstance();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java b/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java
index 2d93084..a7ceec1 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/tableOps/TableRangeOp.java
@@ -20,6 +20,7 @@ import org.apache.accumulo.core.client.impl.thrift.TableOperation;
 import org.apache.accumulo.core.client.impl.thrift.TableOperationExceptionType;
 import org.apache.accumulo.core.client.impl.thrift.ThriftTableOperationException;
 import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.fate.Repo;
 import org.apache.accumulo.server.master.Master;
@@ -96,6 +97,10 @@ public class TableRangeOp extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master env) throws Exception {
     
+    if (RootTable.ID.equals(tableId) && TableOperation.MERGE.equals(op)) {
+      log.warn("Attempt to merge tablets for " + RootTable.NAME + " does nothing. It is not splittable.");
+    }
+    
     Text start = startRow.length == 0 ? null : new Text(startRow);
     Text end = endRow.length == 0 ? null : new Text(endRow);
     Text tableIdText = new Text(tableId);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java b/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
index f1c2678..9e55e08 100644
--- a/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
+++ b/server/src/main/java/org/apache/accumulo/server/metanalysis/FilterMeta.java
@@ -20,8 +20,8 @@ import java.io.IOException;
 import java.util.HashSet;
 import java.util.Set;
 
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.logger.LogEvents;
 import org.apache.accumulo.server.logger.LogFileKey;
 import org.apache.accumulo.server.logger.LogFileValue;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java b/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
index 958b103..6f9f96f 100644
--- a/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
+++ b/server/src/main/java/org/apache/accumulo/server/metanalysis/IndexMeta.java
@@ -30,8 +30,9 @@ import org.apache.accumulo.core.data.ColumnUpdate;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.logger.LogEvents;
 import org.apache.accumulo.server.logger.LogFileKey;
@@ -93,7 +94,7 @@ public class IndexMeta extends Configured implements Tool {
       }
       
       for (ColumnUpdate cu : columnsUpdates) {
-        if (MetadataTable.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && !cu.isDeleted()) {
+        if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && !cu.isDeleted()) {
           prevRow = new Text(cu.getValue());
         }
         

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java b/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
index 281c8f5..9de5d2e 100644
--- a/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
+++ b/server/src/main/java/org/apache/accumulo/server/metanalysis/PrintEvents.java
@@ -22,7 +22,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Map.Entry;
 
-import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.ColumnUpdate;
@@ -31,8 +30,9 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.logger.LogFileValue;
 import org.apache.hadoop.io.Text;
 
@@ -44,11 +44,11 @@ import com.beust.jcommander.Parameter;
 public class PrintEvents {
   
   static class Opts extends ClientOpts {
-    @Parameter(names={"-t", "--tableId"}, description="table id", required=true)
+    @Parameter(names = {"-t", "--tableId"}, description = "table id", required = true)
     String tableId;
-    @Parameter(names={"-e", "--endRow"}, description="end row")
+    @Parameter(names = {"-e", "--endRow"}, description = "end row")
     String endRow;
-    @Parameter(names={"-t", "--time"}, description="time, in milliseconds", required=true)
+    @Parameter(names = {"-t", "--time"}, description = "time, in milliseconds", required = true)
     long time;
   }
   
@@ -78,7 +78,7 @@ public class PrintEvents {
     int count = 0;
     
     String lastLog = null;
-
+    
     loop1: for (Entry<Key,Value> entry : scanner) {
       if (entry.getKey().getColumnQualifier().toString().equals("log")) {
         if (lastLog == null || !lastLog.equals(entry.getValue().toString()))
@@ -96,7 +96,7 @@ public class PrintEvents {
         
         List<ColumnUpdate> columnsUpdates = m.getUpdates();
         for (ColumnUpdate cu : columnsUpdates) {
-          if (MetadataTable.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && count > 0) {
+          if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(new Text(cu.getColumnFamily()), new Text(cu.getColumnQualifier())) && count > 0) {
             System.out.println("Saw change to prevrow, stopping printing events.");
             break loop1;
           }
@@ -104,6 +104,6 @@ public class PrintEvents {
         count++;
       }
     }
-
+    
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java b/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java
index 2419803..127989c 100644
--- a/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java
+++ b/server/src/main/java/org/apache/accumulo/server/monitor/servlets/TablesServlet.java
@@ -33,8 +33,8 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.master.state.MetaDataTableScanner;
 import org.apache.accumulo.server.master.state.TabletLocationState;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java b/server/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
index 6b32117..a34de9f 100644
--- a/server/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
+++ b/server/src/main/java/org/apache/accumulo/server/problems/ProblemReport.java
@@ -35,7 +35,7 @@ import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.security.SecurityConstants;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.hadoop.io.Text;
 
@@ -125,13 +125,13 @@ public class ProblemReport {
   void removeFromMetadataTable() throws Exception {
     Mutation m = new Mutation(new Text("~err_" + tableName));
     m.putDelete(new Text(problemType.name()), new Text(resource));
-    MetadataTable.getMetadataTable(SecurityConstants.getSystemCredentials()).update(m);
+    MetadataTableUtil.getMetadataTable(SecurityConstants.getSystemCredentials()).update(m);
   }
   
   void saveToMetadataTable() throws Exception {
     Mutation m = new Mutation(new Text("~err_" + tableName));
     m.put(new Text(problemType.name()), new Text(resource), new Value(encode()));
-    MetadataTable.getMetadataTable(SecurityConstants.getSystemCredentials()).update(m);
+    MetadataTableUtil.getMetadataTable(SecurityConstants.getSystemCredentials()).update(m);
   }
   
   void removeFromZooKeeper() throws Exception {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java b/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
index 6fbd377..5b82621 100644
--- a/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
+++ b/server/src/main/java/org/apache/accumulo/server/problems/ProblemReports.java
@@ -39,15 +39,16 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.SortedKeyIterator;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.LoggingRunnable;
 import org.apache.accumulo.core.util.NamingThreadFactory;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.security.SecurityConstants;
-import org.apache.accumulo.server.util.MetadataTable;
+import org.apache.accumulo.server.util.MetadataTableUtil;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.commons.collections.map.LRUMap;
 import org.apache.hadoop.io.Text;
@@ -173,7 +174,7 @@ public class ProblemReports implements Iterable<ProblemReport> {
     }
     
     if (hasProblems)
-      MetadataTable.getMetadataTable(SecurityConstants.getSystemCredentials()).update(delMut);
+      MetadataTableUtil.getMetadataTable(SecurityConstants.getSystemCredentials()).update(delMut);
   }
   
   public Iterator<ProblemReport> iterator(final String table) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java b/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
index b858dcd..125915b 100644
--- a/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
+++ b/server/src/main/java/org/apache/accumulo/server/security/AuditedSecurityOperation.java
@@ -32,12 +32,12 @@ import org.apache.accumulo.core.data.thrift.TColumn;
 import org.apache.accumulo.core.data.thrift.TKeyExtent;
 import org.apache.accumulo.core.data.thrift.TRange;
 import org.apache.accumulo.core.master.thrift.TableOperation;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.ByteBufferUtil;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.security.handler.Authenticator;
 import org.apache.accumulo.server.security.handler.Authorizor;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java b/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
index b2330c2..e948894 100644
--- a/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
+++ b/server/src/main/java/org/apache/accumulo/server/security/SecurityOperation.java
@@ -34,13 +34,13 @@ import org.apache.accumulo.core.data.thrift.TColumn;
 import org.apache.accumulo.core.data.thrift.TKeyExtent;
 import org.apache.accumulo.core.data.thrift.TRange;
 import org.apache.accumulo.core.master.thrift.TableOperation;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.master.Master;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java b/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java
index 848dd80..6fe0115 100644
--- a/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java
+++ b/server/src/main/java/org/apache/accumulo/server/security/handler/ZKAuthorizor.java
@@ -24,11 +24,11 @@ import java.util.TreeSet;
 
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;


[12/50] [abbrv] ACCUMULO-1481 : Add tests for splitting/merging root table; refactor to consolidate metadata constants and structures in an organized way; begin consolidating metadata ops into a servicer interface to abstract the code that actually does

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java b/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java
index 9201f1c..f219603 100644
--- a/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java
+++ b/server/src/main/java/org/apache/accumulo/server/security/handler/ZKPermHandler.java
@@ -26,11 +26,11 @@ import java.util.TreeSet;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/tabletserver/Compactor.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/Compactor.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/Compactor.java
index 03da11e..60ec7c0 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/Compactor.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/Compactor.java
@@ -46,12 +46,12 @@ import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
 import org.apache.accumulo.core.iterators.system.DeletingIterator;
 import org.apache.accumulo.core.iterators.system.MultiIterator;
 import org.apache.accumulo.core.iterators.system.TimeSettingIterator;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.core.tabletserver.thrift.ActiveCompaction;
 import org.apache.accumulo.core.tabletserver.thrift.CompactionReason;
 import org.apache.accumulo.core.tabletserver.thrift.CompactionType;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
 import org.apache.accumulo.core.util.LocalityGroupUtil.LocalityGroupConfigurationError;
-import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
 import org.apache.accumulo.server.conf.TableConfiguration;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.fs.VolumeManager;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/tabletserver/FileManager.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/FileManager.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/FileManager.java
index 2d7989c..7349d51 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/FileManager.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/FileManager.java
@@ -41,7 +41,7 @@ import org.apache.accumulo.core.iterators.system.InterruptibleIterator;
 import org.apache.accumulo.core.iterators.system.SourceSwitchingIterator;
 import org.apache.accumulo.core.iterators.system.SourceSwitchingIterator.DataSource;
 import org.apache.accumulo.core.iterators.system.TimeSettingIterator;
-import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.fs.FileRef;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java
index 4f8f398..49ffc65 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/MinorCompactor.java
@@ -25,7 +25,7 @@ import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.TableConfiguration;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
index 1f6f38b..1305be6 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/Tablet.java
@@ -78,15 +78,20 @@ import org.apache.accumulo.core.iterators.system.SourceSwitchingIterator.DataSou
 import org.apache.accumulo.core.iterators.system.StatsIterator;
 import org.apache.accumulo.core.iterators.system.VisibilityFilter;
 import org.apache.accumulo.core.master.thrift.TabletLoadState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
-import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.server.ServerConstants;
@@ -115,8 +120,8 @@ import org.apache.accumulo.server.tabletserver.mastermessage.TabletStatusMessage
 import org.apache.accumulo.server.tabletserver.metrics.TabletServerMinCMetrics;
 import org.apache.accumulo.server.util.FileUtil;
 import org.apache.accumulo.server.util.MapCounter;
-import org.apache.accumulo.server.util.MetadataTable;
-import org.apache.accumulo.server.util.MetadataTable.LogEntry;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.server.util.MetadataTableUtil.LogEntry;
 import org.apache.accumulo.server.util.TabletOperations;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
@@ -540,7 +545,7 @@ public class Tablet {
         
         scanFileReservations.put(rid, absFilePaths);
         
-        Map<FileRef,DataFileValue> ret = new HashMap<FileRef,MetadataTable.DataFileValue>();
+        Map<FileRef,DataFileValue> ret = new HashMap<FileRef,DataFileValue>();
         
         for (FileRef path : absFilePaths) {
           fileScanReferenceCounts.increment(path, 1);
@@ -578,7 +583,7 @@ public class Tablet {
       
       if (filesToDelete.size() > 0) {
         log.debug("Removing scan refs from metadata " + extent + " " + filesToDelete);
-        MetadataTable.removeScanFiles(extent, filesToDelete, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
+        MetadataTableUtil.removeScanFiles(extent, filesToDelete, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
       }
     }
     
@@ -599,7 +604,7 @@ public class Tablet {
       
       if (filesToDelete.size() > 0) {
         log.debug("Removing scan refs from metadata " + extent + " " + filesToDelete);
-        MetadataTable.removeScanFiles(extent, filesToDelete, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
+        MetadataTableUtil.removeScanFiles(extent, filesToDelete, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
       }
     }
     
@@ -645,7 +650,7 @@ public class Tablet {
       
       String bulkDir = null;
       
-      Map<FileRef,DataFileValue> paths = new HashMap<FileRef,MetadataTable.DataFileValue>();
+      Map<FileRef,DataFileValue> paths = new HashMap<FileRef,DataFileValue>();
       for (Entry<FileRef,DataFileValue> entry : pathsString.entrySet())
         paths.put(entry.getKey(), entry.getValue());
       
@@ -660,7 +665,7 @@ public class Tablet {
           }
         }
         if (!inTheRightDirectory) {
-          throw new IOException("Map file " + tpath + " not in table dirs");
+          throw new IOException("Data file " + tpath + " not in table dirs");
         }
         
         if (bulkDir == null)
@@ -683,7 +688,7 @@ public class Tablet {
           throw new IOException(ex);
         }
         // Remove any bulk files we've previously loaded and compacted away
-        List<FileRef> files = MetadataTable.getBulkFilesLoaded(conn, extent, tid);
+        List<FileRef> files = MetadataTableUtil.getBulkFilesLoaded(conn, extent, tid);
         
         for (FileRef file : files)
           if (paths.keySet().remove(file.path()))
@@ -705,7 +710,7 @@ public class Tablet {
             if (bulkTime > persistedTime)
               persistedTime = bulkTime;
             
-            MetadataTable.updateTabletDataFile(tid, extent, paths, tabletTime.getMetadataValue(persistedTime), auths, tabletServer.getLock());
+            MetadataTableUtil.updateTabletDataFile(tid, extent, paths, tabletTime.getMetadataValue(persistedTime), auths, tabletServer.getLock());
           }
         }
       }
@@ -833,7 +838,7 @@ public class Tablet {
       // very important to write delete entries outside of log lock, because
       // this !METADATA write does not go up... it goes sideways or to itself
       if (absMergeFile != null)
-        MetadataTable.addDeleteEntries(extent, Collections.singleton(absMergeFile), SecurityConstants.getSystemCredentials());
+        MetadataTableUtil.addDeleteEntries(extent, Collections.singleton(absMergeFile), SecurityConstants.getSystemCredentials());
       
       Set<String> unusedWalLogs = beginClearingUnusedLogs();
       try {
@@ -848,7 +853,7 @@ public class Tablet {
             persistedTime = commitSession.getMaxCommittedTime();
           
           String time = tabletTime.getMetadataValue(persistedTime);
-          MetadataTable.updateTabletDataFile(extent, newDatafile, absMergeFile, dfv, time, creds, filesInUseByScans, tabletServer.getClientAddressString(),
+          MetadataTableUtil.updateTabletDataFile(extent, newDatafile, absMergeFile, dfv, time, creds, filesInUseByScans, tabletServer.getClientAddressString(),
               tabletServer.getLock(), unusedWalLogs, lastLocation, flushId);
         }
         
@@ -1032,7 +1037,7 @@ public class Tablet {
         Set<FileRef> filesInUseByScans = waitForScansToFinish(oldDatafiles, false, 10000);
         if (filesInUseByScans.size() > 0)
           log.debug("Adding scan refs to metadata " + extent + " " + filesInUseByScans);
-        MetadataTable.replaceDatafiles(extent, oldDatafiles, filesInUseByScans, newDatafile, compactionId, dfv, SecurityConstants.getSystemCredentials(),
+        MetadataTableUtil.replaceDatafiles(extent, oldDatafiles, filesInUseByScans, newDatafile, compactionId, dfv, SecurityConstants.getSystemCredentials(),
             tabletServer.getClientAddressString(), lastLocation, tabletServer.getLock());
         removeFilesAfterScan(filesInUseByScans);
       }
@@ -1043,7 +1048,7 @@ public class Tablet {
     
     public SortedMap<FileRef,DataFileValue> getDatafileSizes() {
       synchronized (Tablet.this) {
-        TreeMap<FileRef,DataFileValue> copy = new TreeMap<FileRef,MetadataTable.DataFileValue>(datafileSizes);
+        TreeMap<FileRef,DataFileValue> copy = new TreeMap<FileRef,DataFileValue>(datafileSizes);
         return Collections.unmodifiableSortedMap(copy);
       }
     }
@@ -1090,7 +1095,7 @@ public class Tablet {
       entries = new TreeMap<Key,Value>();
       Text rowName = extent.getMetadataEntry();
       for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
-        if (entry.getKey().compareRow(rowName) == 0 && MetadataTable.TIME_COLUMN.hasColumns(entry.getKey())) {
+        if (entry.getKey().compareRow(rowName) == 0 && TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(entry.getKey())) {
           entries.put(new Key(entry.getKey()), new Value(entry.getValue()));
         }
       }
@@ -1103,7 +1108,7 @@ public class Tablet {
     return null;
   }
   
-  private static SortedMap<FileRef,DataFileValue> lookupDatafiles(AccumuloConfiguration conf, Text locText, VolumeManager fs, KeyExtent extent,
+  private static SortedMap<FileRef,DataFileValue> lookupDatafiles(AccumuloConfiguration conf, VolumeManager fs, KeyExtent extent,
       SortedMap<Key,Value> tabletsKeyValues) throws IOException {
     
     TreeMap<FileRef,DataFileValue> datafiles = new TreeMap<FileRef,DataFileValue>();
@@ -1134,7 +1139,7 @@ public class Tablet {
       mdScanner.setBatchSize(1000);
       
       // leave these in, again, now using endKey for safety
-      mdScanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+      mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
       
       mdScanner.setRange(new Range(rowName));
       
@@ -1156,7 +1161,7 @@ public class Tablet {
     
     if (ke.isMeta()) {
       try {
-        logEntries = MetadataTable.getLogEntries(SecurityConstants.getSystemCredentials(), ke);
+        logEntries = MetadataTableUtil.getLogEntries(SecurityConstants.getSystemCredentials(), ke);
       } catch (Exception ex) {
         throw new RuntimeException("Unable to read tablet log entries", ex);
       }
@@ -1166,8 +1171,8 @@ public class Tablet {
       for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
         Key key = entry.getKey();
         if (key.getRow().equals(row)) {
-          if (key.getColumnFamily().equals(MetadataTable.LOG_COLUMN_FAMILY)) {
-            logEntries.add(MetadataTable.entryFromKeyValue(key, entry.getValue()));
+          if (key.getColumnFamily().equals(LogColumnFamily.NAME)) {
+            logEntries.add(MetadataTableUtil.entryFromKeyValue(key, entry.getValue()));
           }
         }
       }
@@ -1183,7 +1188,7 @@ public class Tablet {
     Text row = extent.getMetadataEntry();
     for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
       Key key = entry.getKey();
-      if (key.getRow().equals(row) && key.getColumnFamily().equals(MetadataTable.SCANFILE_COLUMN_FAMILY)) {
+      if (key.getRow().equals(row) && key.getColumnFamily().equals(ScanFileColumnFamily.NAME)) {
         String meta = key.getColumnQualifier().toString();
         Path path = fs.getFullPath(ServerConstants.getTablesDirs(), meta);
         scanFiles.add(new FileRef(meta, path));
@@ -1197,7 +1202,7 @@ public class Tablet {
     Text row = extent.getMetadataEntry();
     for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
       Key key = entry.getKey();
-      if (key.getRow().equals(row) && MetadataTable.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
+      if (key.getRow().equals(row) && TabletsSection.ServerColumnFamily.FLUSH_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
         return Long.parseLong(entry.getValue().toString());
     }
     
@@ -1208,7 +1213,7 @@ public class Tablet {
     Text row = extent.getMetadataEntry();
     for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
       Key key = entry.getKey();
-      if (key.getRow().equals(row) && MetadataTable.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
+      if (key.getRow().equals(row) && TabletsSection.ServerColumnFamily.COMPACT_COLUMN.equals(key.getColumnFamily(), key.getColumnQualifier()))
         return Long.parseLong(entry.getValue().toString());
     }
     
@@ -1217,14 +1222,14 @@ public class Tablet {
   
   private Tablet(TabletServer tabletServer, Text location, KeyExtent extent, TabletResourceManager trm, Configuration conf, VolumeManager fs,
       SortedMap<Key,Value> tabletsKeyValues) throws IOException {
-    this(tabletServer, location, extent, trm, conf, fs, lookupLogEntries(extent, tabletsKeyValues), lookupDatafiles(tabletServer.getSystemConfiguration(),
-        location, fs, extent, tabletsKeyValues), lookupTime(tabletServer.getSystemConfiguration(), extent, tabletsKeyValues), lookupLastServer(extent,
-        tabletsKeyValues), lookupScanFiles(extent, tabletsKeyValues, fs), lookupFlushID(extent, tabletsKeyValues), lookupCompactID(extent, tabletsKeyValues));
+    this(tabletServer, location, extent, trm, conf, fs, lookupLogEntries(extent, tabletsKeyValues), lookupDatafiles(tabletServer.getSystemConfiguration(), fs,
+        extent, tabletsKeyValues), lookupTime(tabletServer.getSystemConfiguration(), extent, tabletsKeyValues), lookupLastServer(extent, tabletsKeyValues),
+        lookupScanFiles(extent, tabletsKeyValues, fs), lookupFlushID(extent, tabletsKeyValues), lookupCompactID(extent, tabletsKeyValues));
   }
   
   private static TServerInstance lookupLastServer(KeyExtent extent, SortedMap<Key,Value> tabletsKeyValues) {
     for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
-      if (entry.getKey().getColumnFamily().compareTo(MetadataTable.LAST_LOCATION_COLUMN_FAMILY) == 0) {
+      if (entry.getKey().getColumnFamily().compareTo(TabletsSection.LastLocationColumnFamily.NAME) == 0) {
         return new TServerInstance(entry.getValue(), entry.getKey().getColumnQualifier());
       }
     }
@@ -1370,7 +1375,7 @@ public class Tablet {
         tabletMemory.updateMemoryUsageStats();
         
         if (count[0] == 0) {
-          MetadataTable.removeUnusedWALEntries(extent, logEntries, tabletServer.getLock());
+          MetadataTableUtil.removeUnusedWALEntries(extent, logEntries, tabletServer.getLock());
           logEntries.clear();
         }
         
@@ -2211,7 +2216,7 @@ public class Tablet {
         TCredentials creds = SecurityConstants.getSystemCredentials();
         // if multiple threads were allowed to update this outside of a sync block, then it would be
         // a race condition
-        MetadataTable.updateTabletFlushID(extent, tableFlushID, creds, tabletServer.getLock());
+        MetadataTableUtil.updateTabletFlushID(extent, tableFlushID, creds, tabletServer.getLock());
       } else if (initiateMinor)
         initiateMinorCompaction(tableFlushID, MinorCompactionReason.USER);
       
@@ -2724,7 +2729,7 @@ public class Tablet {
     }
     
     try {
-      Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> fileLog = MetadataTable.getFileAndLogEntries(SecurityConstants.getSystemCredentials(), extent);
+      Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> fileLog = MetadataTableUtil.getFileAndLogEntries(SecurityConstants.getSystemCredentials(), extent);
       
       if (fileLog.getFirst().size() != 0) {
         String msg = "Closed tablet " + extent + " has walog entries in " + MetadataTable.NAME + " " + fileLog.getFirst();
@@ -2734,7 +2739,7 @@ public class Tablet {
       
       if (extent.isRootTablet()) {
         if (!fileLog.getSecond().keySet().equals(datafileManager.getDatafileSizes().keySet())) {
-          String msg = "Data file in " + MetadataTable.NAME + " differ from in memory data " + extent + "  " + fileLog.getSecond().keySet() + "  "
+          String msg = "Data file in " + RootTable.NAME + " differ from in memory data " + extent + "  " + fileLog.getSecond().keySet() + "  "
               + datafileManager.getDatafileSizes().keySet();
           log.error(msg);
           throw new RuntimeException(msg);
@@ -3319,7 +3324,7 @@ public class Tablet {
     try {
       majCStats = _majorCompact(reason);
       if (reason == MajorCompactionReason.CHOP) {
-        MetadataTable.chopped(getExtent(), this.tabletServer.getLock());
+        MetadataTableUtil.chopped(getExtent(), this.tabletServer.getLock());
         tabletServer.enqueueMasterMessage(new TabletStatusMessage(TabletLoadState.CHOPPED, extent));
       }
     } catch (CompactionCanceledException mcce) {
@@ -3500,7 +3505,7 @@ public class Tablet {
       SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
       List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
       
-      MetadataTable.splitDatafiles(extent.getTableId(), midRow, splitRatio, firstAndLastRows, datafileManager.getDatafileSizes(), lowDatafileSizes,
+      MetadataTableUtil.splitDatafiles(extent.getTableId(), midRow, splitRatio, firstAndLastRows, datafileManager.getDatafileSizes(), lowDatafileSizes,
           highDatafileSizes, highDatafilesToRemove);
       
       log.debug("Files for low split " + low + "  " + lowDatafileSizes.keySet());
@@ -3511,12 +3516,12 @@ public class Tablet {
       // it is possible that some of the bulk loading flags will be deleted after being read below because the bulk load
       // finishes.... therefore split could propogate load flags for a finished bulk load... there is a special iterator
       // on the !METADATA table to clean up this type of garbage
-      Map<FileRef,Long> bulkLoadedFiles = MetadataTable.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), extent);
+      Map<FileRef,Long> bulkLoadedFiles = MetadataTableUtil.getBulkFilesLoaded(SecurityConstants.getSystemCredentials(), extent);
       
-      MetadataTable.splitTablet(high, extent.getPrevEndRow(), splitRatio, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
-      MetadataTable.addNewTablet(low, lowDirectory, tabletServer.getTabletSession(), lowDatafileSizes, bulkLoadedFiles,
+      MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
+      MetadataTableUtil.addNewTablet(low, lowDirectory, tabletServer.getTabletSession(), lowDatafileSizes, bulkLoadedFiles,
           SecurityConstants.getSystemCredentials(), time, lastFlushID, lastCompactID, tabletServer.getLock());
-      MetadataTable.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
+      MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
       
       log.log(TLevel.TABLET_HIST, extent + " split " + low + " " + high);
       
@@ -3802,7 +3807,7 @@ public class Tablet {
       try {
         // if multiple threads were allowed to update this outside of a sync block, then it would be
         // a race condition
-        MetadataTable.updateTabletCompactID(extent, compactionId, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
+        MetadataTableUtil.updateTabletCompactID(extent, compactionId, SecurityConstants.getSystemCredentials(), tabletServer.getLock());
       } finally {
         synchronized (this) {
           majorCompactionInProgress = false;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletIteratorEnvironment.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletIteratorEnvironment.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletIteratorEnvironment.java
index 809ef4e..06c6395 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletIteratorEnvironment.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletIteratorEnvironment.java
@@ -28,7 +28,7 @@ import org.apache.accumulo.core.iterators.IteratorEnvironment;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
 import org.apache.accumulo.core.iterators.system.MultiIterator;
-import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.server.fs.FileRef;
 import org.apache.accumulo.server.tabletserver.FileManager.ScanFileManager;
 import org.apache.hadoop.fs.Path;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index 31bc0b9..fe2c2de 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -101,6 +101,9 @@ import org.apache.accumulo.core.master.thrift.MasterClientService;
 import org.apache.accumulo.core.master.thrift.TableInfo;
 import org.apache.accumulo.core.master.thrift.TabletLoadState;
 import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SecurityUtil;
 import org.apache.accumulo.core.security.thrift.TCredentials;
@@ -121,7 +124,6 @@ import org.apache.accumulo.core.util.ColumnFQ;
 import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.LoggingRunnable;
 import org.apache.accumulo.core.util.Pair;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.ServerServices;
 import org.apache.accumulo.core.util.ServerServices.Service;
 import org.apache.accumulo.core.util.SimpleThreadPool;
@@ -183,8 +185,8 @@ import org.apache.accumulo.server.tabletserver.metrics.TabletServerUpdateMetrics
 import org.apache.accumulo.server.util.FileSystemMonitor;
 import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.util.MapCounter;
-import org.apache.accumulo.server.util.MetadataTable;
-import org.apache.accumulo.server.util.MetadataTable.LogEntry;
+import org.apache.accumulo.server.util.MetadataTableUtil;
+import org.apache.accumulo.server.util.MetadataTableUtil.LogEntry;
 import org.apache.accumulo.server.util.TServerUtils;
 import org.apache.accumulo.server.util.TServerUtils.ServerPort;
 import org.apache.accumulo.server.util.time.RelativeTime;
@@ -1254,6 +1256,9 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
         tables.add(new String(keyExtent.getTable()));
       }
       
+      if (tables.size() != 1)
+        throw new IllegalArgumentException("Cannot batch scan over multiple tables");
+      
       // check if user has permission to the tables
       Authorizations userauths = null;
       for (String table : tables)
@@ -1265,21 +1270,10 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
         if (!userauths.contains(ByteBufferUtil.toBytes(auth)))
           throw new ThriftSecurityException(credentials.getPrincipal(), SecurityErrorCode.BAD_AUTHORIZATIONS);
       
-      KeyExtent threadPoolExtent = null;
-      
       Map<KeyExtent,List<Range>> batch = Translator.translate(tbatch, Translator.TKET, new Translator.ListTranslator<TRange,Range>(Translator.TRT));
       
-      for (KeyExtent keyExtent : batch.keySet()) {
-        if (threadPoolExtent == null) {
-          threadPoolExtent = keyExtent;
-        } else if (keyExtent.isRootTablet()) {
-          throw new IllegalArgumentException("Cannot batch query root tablet with other tablets " + threadPoolExtent + " " + keyExtent);
-        } else if (keyExtent.isMeta() && !threadPoolExtent.isMeta()) {
-          throw new IllegalArgumentException("Cannot batch query " + MetadataTable.NAME + " and non " + MetadataTable.NAME + " tablets " + threadPoolExtent
-              + " " + keyExtent);
-        }
-        
-      }
+      // This is used to determine which thread pool to use
+      KeyExtent threadPoolExtent = batch.keySet().iterator().next();
       
       if (waitForWrites)
         writeTracker.waitForWrites(TabletType.type(batch.keySet()));
@@ -2597,14 +2591,14 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     List<String> logSet = new ArrayList<String>();
     for (DfsLogger log : logs)
       logSet.add(log.toString());
-    MetadataTable.LogEntry entry = new MetadataTable.LogEntry();
+    MetadataTableUtil.LogEntry entry = new MetadataTableUtil.LogEntry();
     entry.extent = extent;
     entry.tabletId = id;
     entry.timestamp = now;
     entry.server = logs.get(0).getLogger();
     entry.filename = logs.get(0).getFileName();
     entry.logSet = logSet;
-    MetadataTable.addLogEntry(SecurityConstants.getSystemCredentials(), entry, getLock());
+    MetadataTableUtil.addLogEntry(SecurityConstants.getSystemCredentials(), entry, getLock());
   }
   
   private int startServer(AccumuloConfiguration conf, Property portHint, TProcessor processor, String threadName) throws UnknownHostException {
@@ -2873,7 +2867,7 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
       throw new AccumuloException("Root tablet already has a location set");
     }
     
-    return new Pair<Text,KeyExtent>(new Text(RootTable.ZROOT_TABLET), null);
+    return new Pair<Text,KeyExtent>(new Text(RootTable.ROOT_TABLET_LOCATION), null);
   }
   
   public static Pair<Text,KeyExtent> verifyTabletInformation(KeyExtent extent, TServerInstance instance, SortedMap<Key,Value> tabletsKeyValues,
@@ -2887,8 +2881,9 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     if (extent.isMeta())
       tableToVerify = RootTable.ID;
     
-    List<ColumnFQ> columnsToFetch = Arrays.asList(new ColumnFQ[] {MetadataTable.DIRECTORY_COLUMN, MetadataTable.PREV_ROW_COLUMN,
-        MetadataTable.SPLIT_RATIO_COLUMN, MetadataTable.OLD_PREV_ROW_COLUMN, MetadataTable.TIME_COLUMN});
+    List<ColumnFQ> columnsToFetch = Arrays.asList(new ColumnFQ[] {TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN,
+        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN, TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN,
+        TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN, TabletsSection.ServerColumnFamily.TIME_COLUMN});
     
     ScannerImpl scanner = new ScannerImpl(HdfsZooInstance.getInstance(), SecurityConstants.getSystemCredentials(), tableToVerify, Authorizations.EMPTY);
     scanner.setRange(extent.toMetadataRange());
@@ -2913,18 +2908,18 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
     
     Value oldPrevEndRow = null;
     for (Entry<Key,Value> entry : tabletsKeyValues.entrySet()) {
-      if (MetadataTable.OLD_PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
+      if (TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
         oldPrevEndRow = entry.getValue();
       }
     }
     
     if (oldPrevEndRow != null) {
       SortedMap<Text,SortedMap<ColumnFQ,Value>> tabletEntries;
-      tabletEntries = MetadataTable.getTabletEntries(tabletsKeyValues, columnsToFetch);
+      tabletEntries = MetadataTableUtil.getTabletEntries(tabletsKeyValues, columnsToFetch);
       
       KeyExtent fke;
       try {
-        fke = MetadataTable.fixSplit(metadataEntry, tabletEntries.get(metadataEntry), instance, SecurityConstants.getSystemCredentials(), lock);
+        fke = MetadataTableUtil.fixSplit(metadataEntry, tabletEntries.get(metadataEntry), instance, SecurityConstants.getSystemCredentials(), lock);
       } catch (IOException e) {
         log.error("Error fixing split " + metadataEntry);
         throw new AccumuloException(e.toString());
@@ -2956,19 +2951,19 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
         return null;
       }
       Text cf = key.getColumnFamily();
-      if (cf.equals(MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY)) {
+      if (cf.equals(TabletsSection.FutureLocationColumnFamily.NAME)) {
         if (future != null) {
           throw new AccumuloException("Tablet has multiple future locations " + extent);
         }
         future = new TServerInstance(entry.getValue(), key.getColumnQualifier());
-      } else if (cf.equals(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY)) {
+      } else if (cf.equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
         log.info("Tablet seems to be already assigned to " + new TServerInstance(entry.getValue(), key.getColumnQualifier()));
         return null;
-      } else if (MetadataTable.PREV_ROW_COLUMN.hasColumns(key)) {
+      } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
         prevEndRow = entry.getValue();
-      } else if (MetadataTable.DIRECTORY_COLUMN.hasColumns(key)) {
+      } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
         dir = entry.getValue();
-      } else if (MetadataTable.TIME_COLUMN.hasColumns(key)) {
+      } else if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
         time = entry.getValue();
       }
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java
index 2fdc4cc..78313e7 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServerResourceManager.java
@@ -41,11 +41,11 @@ import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.file.blockfile.cache.LruBlockCache;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
 import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.LoggingRunnable;
-import org.apache.accumulo.core.util.MetadataTable.DataFileValue;
 import org.apache.accumulo.core.util.NamingThreadFactory;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.fs.FileRef;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java b/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java
index 7255765..813d54c 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/AddFilesWithMissingEntries.java
@@ -26,12 +26,12 @@ import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.PartialKey;
-import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.hadoop.conf.Configuration;
@@ -53,18 +53,16 @@ public class AddFilesWithMissingEntries {
   }
   
   /**
-   * A utility to add files to the {@value MetadataTable#NAME} table that are not listed in the root tablet. This is a recovery tool for someone who
-   * knows what they are doing. It might be better to save off files, and recover your instance by re-initializing and importing the existing files.
+   * A utility to add files to the {@value MetadataTable#NAME} table that are not listed in the root tablet. This is a recovery tool for someone who knows what
+   * they are doing. It might be better to save off files, and recover your instance by re-initializing and importing the existing files.
    */
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(AddFilesWithMissingEntries.class.getName(), args, bwOpts);
     
-    final Key rootTableEnd = new Key(RootTable.EXTENT.getEndRow());
-    final Range range = new Range(rootTableEnd.followingKey(PartialKey.ROW), true, MetadataTable.RESERVED_RANGE_START_KEY, false);
     final Scanner scanner = opts.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    scanner.setRange(range);
+    scanner.setRange(MetadataSchema.TabletsSection.getRange());
     final Configuration conf = new Configuration();
     final FileSystem fs = FileSystem.get(conf);
     
@@ -89,10 +87,10 @@ public class AddFilesWithMissingEntries {
         knownFiles.clear();
         last = ke;
       }
-      if (MetadataTable.DIRECTORY_COLUMN.hasColumns(key)) {
+      if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
         directory = entry.getValue().toString();
         log.debug("Found directory " + directory + " for row " + key.getRow().toString());
-      } else if (key.compareColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY) == 0) {
+      } else if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
         String filename = key.getColumnQualifier().toString();
         knownFiles.add(filename);
         log.debug("METADATA file found: " + filename);
@@ -124,7 +122,7 @@ public class AddFilesWithMissingEntries {
           String size = Long.toString(file.getLen());
           String entries = "1"; // lie
           String value = size + "," + entries;
-          m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text(filename), new Value(value.getBytes()));
+          m.put(DataFileColumnFamily.NAME, new Text(filename), new Value(value.getBytes()));
           if (update) {
             writer.getBatchWriter(MetadataTable.NAME).addMutation(m);
           }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/Admin.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
index 617053a..3bb801a 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -31,9 +31,9 @@ import org.apache.accumulo.core.client.impl.ClientExec;
 import org.apache.accumulo.core.client.impl.MasterClient;
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken;
 import org.apache.accumulo.core.master.thrift.MasterClientService;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.security.SecurityConstants;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java b/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
index 9c80927..fde9610 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
@@ -30,6 +30,9 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException;
@@ -85,7 +88,7 @@ public class CheckForMetadataProblems {
       if (broke && opts.fix) {
         KeyExtent ke = new KeyExtent(tabke);
         ke.setPrevEndRow(lastEndRow);
-        MetadataTable.updateTabletPrevEndRow(ke, CredentialHelper.create(opts.principal, opts.getToken(), opts.instance));
+        MetadataTableUtil.updateTabletPrevEndRow(ke, CredentialHelper.create(opts.principal, opts.getToken(), opts.instance));
         System.out.println("KE " + tabke + " has been repaired to " + ke);
       }
       
@@ -108,9 +111,9 @@ public class CheckForMetadataProblems {
       scanner = opts.getConnector().createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     }
     
-    scanner.setRange(MetadataTable.KEYSPACE);
-    MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
-    scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
+    scanner.setRange(MetadataSchema.TabletsSection.getRange());
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
+    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
     
     Text colf = new Text();
     Text colq = new Text();
@@ -140,16 +143,16 @@ public class CheckForMetadataProblems {
         tables.put(tableName, tablets);
       }
       
-      if (MetadataTable.PREV_ROW_COLUMN.equals(colf, colq)) {
+      if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(colf, colq)) {
         KeyExtent tabletKe = new KeyExtent(entry.getKey().getRow(), entry.getValue());
         tablets.add(tabletKe);
         justLoc = false;
-      } else if (colf.equals(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY)) {
+      } else if (colf.equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
         if (justLoc) {
           System.out.println("Problem at key " + entry.getKey());
           sawProblems = true;
           if (opts.fix) {
-            Writer t = MetadataTable.getMetadataTable(CredentialHelper.create(opts.principal, opts.getToken(), opts.instance));
+            Writer t = MetadataTableUtil.getMetadataTable(CredentialHelper.create(opts.principal, opts.getToken(), opts.instance));
             Key k = entry.getKey();
             Mutation m = new Mutation(k.getRow());
             m.putDelete(k.getColumnFamily(), k.getColumnQualifier());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java b/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
index 80b1a7c..de27112 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/FindOfflineTablets.java
@@ -23,8 +23,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.master.state.tables.TableState;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.master.LiveTServerSet;
 import org.apache.accumulo.server.master.LiveTServerSet.Listener;
@@ -48,10 +48,10 @@ public class FindOfflineTablets {
     opts.parseArgs(FindOfflineTablets.class.getName(), args);
     final AtomicBoolean scanning = new AtomicBoolean(false);
     Instance instance = opts.getInstance();
-    MetaDataTableScanner rootScanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), RootTable.METADATA_TABLETS_RANGE);
-    MetaDataTableScanner metaScanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), MetadataTable.NON_ROOT_KEYSPACE);
+    MetaDataTableScanner rootScanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), MetadataSchema.TabletsSection.getRange());
+    MetaDataTableScanner metaScanner = new MetaDataTableScanner(instance, SecurityConstants.getSystemCredentials(), MetadataSchema.TabletsSection.getRange());
     @SuppressWarnings("unchecked")
-    Iterator<TabletLocationState> scanner = (Iterator<TabletLocationState>)new IteratorChain(rootScanner, metaScanner);
+    Iterator<TabletLocationState> scanner = new IteratorChain(rootScanner, metaScanner);
     LiveTServerSet tservers = new LiveTServerSet(instance, DefaultConfiguration.getDefaultConfiguration(), new Listener() {
       @Override
       public void update(LiveTServerSet current, Set<TServerInstance> deleted, Set<TServerInstance> added) {
@@ -66,7 +66,8 @@ public class FindOfflineTablets {
     while (scanner.hasNext()) {
       TabletLocationState locationState = scanner.next();
       TabletState state = locationState.getState(tservers.getCurrentServers());
-      if (state != null && state != TabletState.HOSTED && TableManager.getInstance().getTableState(locationState.extent.getTableId().toString()) != TableState.OFFLINE)
+      if (state != null && state != TabletState.HOSTED
+          && TableManager.getInstance().getTableState(locationState.extent.getTableId().toString()) != TableState.OFFLINE)
         if (!locationState.extent.equals(RootTable.EXTENT))
           System.out.println(locationState + " is " + state + "  #walogs:" + locationState.walogs.size());
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/Initialize.java b/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
index 0b62c6c..7d4e6f2 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
@@ -43,10 +43,14 @@ import org.apache.accumulo.core.file.FileSKVWriter;
 import org.apache.accumulo.core.iterators.user.VersioningIterator;
 import org.apache.accumulo.core.master.state.tables.TableState;
 import org.apache.accumulo.core.master.thrift.MasterGoalState;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 import org.apache.accumulo.core.security.SecurityUtil;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
@@ -81,6 +85,7 @@ import com.beust.jcommander.Parameter;
 public class Initialize {
   private static final Logger log = Logger.getLogger(Initialize.class);
   private static final String DEFAULT_ROOT_USER = "root";
+  public static final String TABLE_TABLETS_TABLET_DIR = "/table_info";
   
   private static ConsoleReader reader = null;
   
@@ -107,10 +112,9 @@ public class Initialize {
     initialMetadataConf.put(Property.TABLE_ITERATOR_PREFIX.getKey() + "majc.bulkLoadFilter", "20," + MetadataBulkLoadFilter.class.getName());
     initialMetadataConf.put(Property.TABLE_FAILURES_IGNORE.getKey(), "false");
     initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "tablet",
-        String.format("%s,%s", MetadataTable.TABLET_COLUMN_FAMILY.toString(), MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY.toString()));
-    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "server", String.format("%s,%s,%s,%s",
-        MetadataTable.DATAFILE_COLUMN_FAMILY.toString(), MetadataTable.LOG_COLUMN_FAMILY.toString(), MetadataTable.SERVER_COLUMN_FAMILY.toString(),
-        MetadataTable.FUTURE_LOCATION_COLUMN_FAMILY.toString()));
+        String.format("%s,%s", TabletsSection.TabletColumnFamily.NAME, TabletsSection.CurrentLocationColumnFamily.NAME));
+    initialMetadataConf.put(Property.TABLE_LOCALITY_GROUP_PREFIX.getKey() + "server", String.format("%s,%s,%s,%s", DataFileColumnFamily.NAME,
+        LogColumnFamily.NAME, TabletsSection.ServerColumnFamily.NAME, TabletsSection.FutureLocationColumnFamily.NAME));
     initialMetadataConf.put(Property.TABLE_LOCALITY_GROUPS.getKey(), "tablet,server");
     initialMetadataConf.put(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "");
     initialMetadataConf.put(Property.TABLE_INDEXCACHE_ENABLED.getKey(), "true");
@@ -229,7 +233,7 @@ public class Initialize {
     
     // the actual disk locations of the metadata table and tablets
     final Path[] metadataTableDirs = paths(ServerConstants.getMetadataTableDirs());
-    final Path[] tableMetadataTabletDirs = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), MetadataTable.TABLE_TABLET_LOCATION));
+    final Path[] tableMetadataTabletDirs = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), TABLE_TABLETS_TABLET_DIR));
     final Path[] defaultMetadataTabletDirs = paths(ServerConstants.prefix(ServerConstants.getMetadataTableDirs(), Constants.DEFAULT_TABLET_LOCATION));
     
     fs.mkdirs(new Path(ServerConstants.getDataVersionLocation(), "" + ServerConstants.DATA_VERSION));
@@ -279,35 +283,40 @@ public class Initialize {
     FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, ns, ns.getConf(), AccumuloConfiguration.getDefaultConfiguration());
     mfw.startDefaultLocalityGroup();
     
-    // ----------] table tablet info
-    Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), MetadataTable.RESERVED_RANGE_START_KEY.getRow()));
+    Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
     
     // table tablet's directory
-    Key tableDirKey = new Key(tableExtent, MetadataTable.DIRECTORY_COLUMN.getColumnFamily(), MetadataTable.DIRECTORY_COLUMN.getColumnQualifier(), 0);
-    mfw.append(tableDirKey, new Value(MetadataTable.TABLE_TABLET_LOCATION.getBytes()));
+    Key tableDirKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
+        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
+    mfw.append(tableDirKey, new Value(TABLE_TABLETS_TABLET_DIR.getBytes()));
     
     // table tablet time
-    Key tableTimeKey = new Key(tableExtent, MetadataTable.TIME_COLUMN.getColumnFamily(), MetadataTable.TIME_COLUMN.getColumnQualifier(), 0);
+    Key tableTimeKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
+        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
     mfw.append(tableTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
     
     // table tablet's prevrow
-    Key tablePrevRowKey = new Key(tableExtent, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(), MetadataTable.PREV_ROW_COLUMN.getColumnQualifier(), 0);
+    Key tablePrevRowKey = new Key(tableExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
+        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
     mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(null));
     
     // ----------] default tablet info
     Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), null));
     
     // default's directory
-    Key defaultDirKey = new Key(defaultExtent, MetadataTable.DIRECTORY_COLUMN.getColumnFamily(), MetadataTable.DIRECTORY_COLUMN.getColumnQualifier(), 0);
+    Key defaultDirKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
+        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
     mfw.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes()));
     
     // default's time
-    Key defaultTimeKey = new Key(defaultExtent, MetadataTable.TIME_COLUMN.getColumnFamily(), MetadataTable.TIME_COLUMN.getColumnQualifier(), 0);
+    Key defaultTimeKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
+        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
     mfw.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
     
     // default's prevrow
-    Key defaultPrevRowKey = new Key(defaultExtent, MetadataTable.PREV_ROW_COLUMN.getColumnFamily(), MetadataTable.PREV_ROW_COLUMN.getColumnQualifier(), 0);
-    mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(MetadataTable.RESERVED_RANGE_START_KEY.getRow()));
+    Key defaultPrevRowKey = new Key(defaultExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
+        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
+    mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
     
     mfw.close();
     

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java b/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java
index 5c9bd64..f7a57d8 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/LocalityCheck.java
@@ -26,8 +26,11 @@ import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
@@ -45,9 +48,9 @@ public class LocalityCheck {
     VolumeManager fs = VolumeManagerImpl.get();
     Connector connector = opts.getConnector();
     Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
-    scanner.fetchColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY);
-    scanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
-    scanner.setRange(MetadataTable.KEYSPACE);
+    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    scanner.setRange(MetadataSchema.TabletsSection.getRange());
     
     Map<String,Long> totalBlocks = new HashMap<String,Long>();
     Map<String,Long> localBlocks = new HashMap<String,Long>();
@@ -55,13 +58,13 @@ public class LocalityCheck {
     
     for (Entry<Key,Value> entry : scanner) {
       Key key = entry.getKey();
-      if (key.compareColumnFamily(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY) == 0) {
+      if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
         String location = entry.getValue().toString();
         String[] parts = location.split(":");
         String host = parts[0];
         addBlocks(fs, host, files, totalBlocks, localBlocks);
         files.clear();
-      } else if (key.compareColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY) == 0) {
+      } else if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
         
         files.add(fs.getFullPath(key).toString());
       }


[43/50] [abbrv] git commit: ACCUMULO-1496 Use classloader to locate annotated launchable classes

Posted by ct...@apache.org.
ACCUMULO-1496 Use classloader to locate annotated launchable classes

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-1496@1491605 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/dd0b97e5
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/dd0b97e5
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/dd0b97e5

Branch: refs/heads/ACCUMULO-1496
Commit: dd0b97e5c8a1c00e0e309694a3a6242afc0c8870
Parents: db39c35
Author: Christopher Tubbs <ct...@apache.org>
Authored: Mon Jun 10 21:23:30 2013 +0000
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Tue Jul 16 15:01:47 2013 -0400

----------------------------------------------------------------------
 api/pom.xml                                     |  27 +++++
 .../api/annotations/AccumuloService.java        |  33 ++++++
 assemble/pom.xml                                |  12 +++
 assemble/src/main/assemblies/component.xml      |   2 +
 bin/accumulo                                    |   5 +-
 .../accumulo/core/file/rfile/PrintInfo.java     |   4 +-
 .../apache/accumulo/core/util/CreateToken.java  |   2 +
 .../apache/accumulo/core/util/shell/Shell.java  |   2 +
 .../minicluster/MiniAccumuloRunner.java         |   2 +
 pom.xml                                         |  16 +++
 .../java/org/apache/accumulo/proxy/Proxy.java   |   7 +-
 .../server/gc/SimpleGarbageCollector.java       |   2 +
 .../apache/accumulo/server/master/Master.java   |   2 +
 .../apache/accumulo/server/monitor/Monitor.java |   3 +
 .../server/tabletserver/TabletServer.java       |   2 +
 .../accumulo/server/trace/TraceServer.java      |   6 +-
 .../org/apache/accumulo/server/util/Admin.java  |   3 +
 .../apache/accumulo/server/util/Initialize.java |   2 +
 .../accumulo/server/util/ZooKeeperMain.java     |   3 +
 start/pom.xml                                   |  12 +++
 .../java/org/apache/accumulo/start/Main.java    | 106 +++++++++++--------
 .../classloader/vfs/AccumuloVFSClassLoader.java |  25 ++++-
 22 files changed, 226 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/api/pom.xml
----------------------------------------------------------------------
diff --git a/api/pom.xml b/api/pom.xml
new file mode 100644
index 0000000..9fe5f60
--- /dev/null
+++ b/api/pom.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.accumulo</groupId>
+    <artifactId>accumulo-project</artifactId>
+    <version>1.6.0-SNAPSHOT</version>
+  </parent>
+  <artifactId>accumulo-api</artifactId>
+  <name>API</name>
+</project>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/api/src/main/java/org/apache/accumulo/api/annotations/AccumuloService.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/accumulo/api/annotations/AccumuloService.java b/api/src/main/java/org/apache/accumulo/api/annotations/AccumuloService.java
new file mode 100644
index 0000000..31fbc67
--- /dev/null
+++ b/api/src/main/java/org/apache/accumulo/api/annotations/AccumuloService.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.api.annotations;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Annotation to describe a class that can be launched by Accumulo
+ * 
+ * @since 1.6.0
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE)
+public @interface AccumuloService {
+  String value();
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/assemble/pom.xml
----------------------------------------------------------------------
diff --git a/assemble/pom.xml b/assemble/pom.xml
index db40b65..7584510 100644
--- a/assemble/pom.xml
+++ b/assemble/pom.xml
@@ -47,6 +47,14 @@
       <artifactId>jline</artifactId>
     </dependency>
     <dependency>
+      <groupId>net.sf.scannotation</groupId>
+      <artifactId>scannotation</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-api</artifactId>
+    </dependency>
+    <dependency>
       <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-core</artifactId>
     </dependency>
@@ -90,6 +98,10 @@
       <groupId>org.apache.thrift</groupId>
       <artifactId>libthrift</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.javassist</groupId>
+      <artifactId>javassist</artifactId>
+    </dependency>
   </dependencies>
   <profiles>
     <profile>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/assemble/src/main/assemblies/component.xml
----------------------------------------------------------------------
diff --git a/assemble/src/main/assemblies/component.xml b/assemble/src/main/assemblies/component.xml
index 3655a22..c366c41 100644
--- a/assemble/src/main/assemblies/component.xml
+++ b/assemble/src/main/assemblies/component.xml
@@ -32,7 +32,9 @@
         <include>com.google.code.gson:gson</include>
         <include>com.google.guava:guava</include>
         <include>jline:jline</include>
+        <include>net.sf.scannotation:scannotation</include>
         <include>org.apache.thrift:libthrift</include>
+        <include>org.javassist:javassist</include>
       </includes>
       <excludes>
         <exclude>${groupId}:${artifactId}-docs</exclude>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/bin/accumulo
----------------------------------------------------------------------
diff --git a/bin/accumulo b/bin/accumulo
index dcbdbae..a3b0a21 100755
--- a/bin/accumulo
+++ b/bin/accumulo
@@ -29,6 +29,9 @@ script=$( basename "$SOURCE" )
 . "$bin"/config.sh
 
 START_JAR=$ACCUMULO_HOME/lib/accumulo-start.jar
+API_JAR=$ACCUMULO_HOME/lib/accumulo-api.jar
+SCANNOTATION_JAR=$ACCUMULO_HOME/lib/scannotation.jar
+JAVASSIST_JAR=$ACCUMULO_HOME/lib/javassist.jar
 
 #
 # Resolve a program to its installation directory
@@ -81,7 +84,7 @@ esac
 
 XML_FILES=${ACCUMULO_HOME}/conf
 LOG4J_JAR=$(find $HADOOP_PREFIX/lib $HADOOP_PREFIX/share/hadoop/common/lib -name 'log4j*.jar' -print 2>/dev/null | head -1)
-CLASSPATH=${XML_FILES}:${START_JAR}:${LOG4J_JAR}
+CLASSPATH=${XML_FILES}:${API_JAR}:${JAVASSIST_JAR}:${SCANNOTATION_JAR}:${START_JAR}:${LOG4J_JAR}
 
 if [ -z "$JAVA_HOME" -o ! -d "$JAVA_HOME" ]; then
    echo "JAVA_HOME is not set or is not a directory.  Please make sure it's set globally or in conf/accumulo-env.sh"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
index e591403..2f06474 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
@@ -19,6 +19,7 @@ package org.apache.accumulo.core.file.rfile;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.ByteSequence;
@@ -34,6 +35,7 @@ import org.apache.hadoop.fs.Path;
 
 import com.beust.jcommander.Parameter;
 
+@AccumuloService("rfile-info")
 public class PrintInfo {
   
   static class Opts extends Help {
@@ -49,7 +51,7 @@ public class PrintInfo {
     Configuration conf = new Configuration();
     @SuppressWarnings("deprecation")
     FileSystem hadoopFs = FileUtil.getFileSystem(conf, AccumuloConfiguration.getSiteConfiguration());
-    FileSystem localFs  = FileSystem.getLocal(conf);
+    FileSystem localFs = FileSystem.getLocal(conf);
     Opts opts = new Opts();
     opts.parseArgs(PrintInfo.class.getName(), args);
     if (opts.files.isEmpty()) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java b/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
index d8e6d94..5b25424 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
@@ -23,6 +23,7 @@ import java.io.PrintStream;
 
 import jline.console.ConsoleReader;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.ClientOpts.Password;
 import org.apache.accumulo.core.cli.ClientOpts.PasswordConverter;
@@ -35,6 +36,7 @@ import org.apache.accumulo.core.security.CredentialHelper;
 
 import com.beust.jcommander.Parameter;
 
+@AccumuloService("create-token")
 public class CreateToken {
   
   private static ConsoleReader reader = null;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
index ef3e519..129fff7 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
@@ -37,6 +37,7 @@ import java.util.UUID;
 import jline.console.ConsoleReader;
 import jline.console.history.FileHistory;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -158,6 +159,7 @@ import com.beust.jcommander.ParameterException;
 /**
  * A convenient console interface to perform basic accumulo functions Includes auto-complete, help, and quoted strings with escape sequences
  */
+@AccumuloService("shell")
 public class Shell extends ShellOptions {
   public static final Logger log = Logger.getLogger(Shell.class);
   private static final Logger audit = Logger.getLogger(Shell.class.getName() + ".audit");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
index 1402b53..d3b2f94 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
@@ -27,6 +27,7 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.regex.Pattern;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.commons.io.FileUtils;
@@ -58,6 +59,7 @@ import com.google.common.io.Files;
  * 
  * @since 1.6.0
  */
+@AccumuloService("minicluster")
 public class MiniAccumuloRunner {
   public static class PropertiesConverter implements IStringConverter<Properties> {
     @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 905e4e7..6d6539c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -72,6 +72,7 @@
     <maven>${maven.min-version}</maven>
   </prerequisites>
   <modules>
+    <module>api</module>
     <module>trace</module>
     <module>core</module>
     <module>fate</module>
@@ -206,6 +207,16 @@
         <version>1.2.16</version>
       </dependency>
       <dependency>
+        <groupId>net.sf.scannotation</groupId>
+        <artifactId>scannotation</artifactId>
+        <version>1.0.2</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.accumulo</groupId>
+        <artifactId>accumulo-api</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
         <groupId>org.apache.accumulo</groupId>
         <artifactId>accumulo-core</artifactId>
         <version>${project.version}</version>
@@ -328,6 +339,11 @@
         <version>3.1</version>
       </dependency>
       <dependency>
+        <groupId>org.javassist</groupId>
+        <artifactId>javassist</artifactId>
+        <version>3.17.1-GA</version>
+      </dependency>
+      <dependency>
         <groupId>org.mortbay.jetty</groupId>
         <artifactId>jetty</artifactId>
         <version>6.1.26</version>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
----------------------------------------------------------------------
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
index 72231f0..9552456 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
@@ -23,6 +23,7 @@ import java.io.InputStream;
 import java.lang.reflect.Constructor;
 import java.util.Properties;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
@@ -40,6 +41,7 @@ import com.beust.jcommander.IStringConverter;
 import com.beust.jcommander.Parameter;
 import com.google.common.io.Files;
 
+@AccumuloService("proxy")
 public class Proxy {
   
   private static final Logger log = Logger.getLogger(Proxy.class);
@@ -100,6 +102,7 @@ public class Proxy {
       opts.prop.setProperty("instance", accumulo.getConfig().getInstanceName());
       opts.prop.setProperty("zookeepers", accumulo.getConfig().getZooKeepers());
       Runtime.getRuntime().addShutdownHook(new Thread() {
+        @Override
         public void start() {
           try {
             accumulo.stop();
@@ -128,7 +131,7 @@ public class Proxy {
     
     Class<?> proxyProcClass = Class.forName(api.getName() + "$Processor");
     Class<?> proxyIfaceClass = Class.forName(api.getName() + "$Iface");
-
+    
     @SuppressWarnings("unchecked")
     Constructor<? extends TProcessor> proxyProcConstructor = (Constructor<? extends TProcessor>) proxyProcClass.getConstructor(proxyIfaceClass);
     
@@ -139,7 +142,7 @@ public class Proxy {
     final long maxFrameSize = AccumuloConfiguration.getMemoryInBytes(properties.getProperty("maxFrameSize", "16M"));
     if (maxFrameSize > Integer.MAX_VALUE)
       throw new RuntimeException(maxFrameSize + " is larger than MAX_INT");
-    args.transportFactory(new TFramedTransport.Factory((int)maxFrameSize));
+    args.transportFactory(new TFramedTransport.Factory((int) maxFrameSize));
     args.protocolFactory(protoClass.newInstance());
     return new THsHaServer(args);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java b/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
index f18e5bc..864717f 100644
--- a/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
+++ b/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
@@ -36,6 +36,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -104,6 +105,7 @@ import org.apache.zookeeper.KeeperException;
 
 import com.beust.jcommander.Parameter;
 
+@AccumuloService("gc")
 public class SimpleGarbageCollector implements Iface {
   private static final Text EMPTY_TEXT = new Text();
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/server/src/main/java/org/apache/accumulo/server/master/Master.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/Master.java b/server/src/main/java/org/apache/accumulo/server/master/Master.java
index b5ffd0a..fa8a7e5 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/Master.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/Master.java
@@ -34,6 +34,7 @@ import java.util.TreeMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -163,6 +164,7 @@ import org.apache.zookeeper.data.Stat;
  * 
  * The master will also coordinate log recoveries and reports general status.
  */
+@AccumuloService("master")
 public class Master implements LiveTServerSet.Listener, TableObserver, CurrentState {
   
   final static Logger log = Logger.getLogger(Master.class);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java b/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
index 56e473a..6069f64 100644
--- a/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
+++ b/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
@@ -27,6 +27,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.MasterClient;
@@ -73,6 +74,7 @@ import org.apache.accumulo.server.problems.ProblemType;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.util.EmbeddedWebServer;
 import org.apache.accumulo.trace.instrument.Tracer;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;
@@ -81,6 +83,7 @@ import org.apache.zookeeper.ZooKeeper;
 /**
  * Serve master statistics with an embedded web server.
  */
+@AccumuloService("monitor")
 public class Monitor {
   private static final Logger log = Logger.getLogger(Monitor.class);
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index 9d50f07..9ddf6bb 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -62,6 +62,7 @@ import java.util.concurrent.atomic.AtomicReference;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -220,6 +221,7 @@ enum ScanRunState {
   QUEUED, RUNNING, FINISHED
 }
 
+@AccumuloService("tserver")
 public class TabletServer extends AbstractMetricsImpl implements org.apache.accumulo.server.tabletserver.metrics.TabletServerMBean {
   private static final Logger log = Logger.getLogger(TabletServer.class);
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java b/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
index 67a55fa..841166d 100644
--- a/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
@@ -23,6 +23,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
@@ -72,6 +73,7 @@ import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.Watcher.Event.EventType;
 import org.apache.zookeeper.Watcher.Event.KeeperState;
 
+@AccumuloService("tracer")
 public class TraceServer implements Watcher {
   
   final private static Logger log = Logger.getLogger(TraceServer.class);
@@ -172,12 +174,12 @@ public class TraceServer implements Watcher {
           Properties props = new Properties();
           AuthenticationToken token = AccumuloClassLoader.getClassLoader().loadClass(conf.get(Property.TRACE_TOKEN_TYPE)).asSubclass(AuthenticationToken.class)
               .newInstance();
-
+          
           int prefixLength = Property.TRACE_TOKEN_PROPERTY_PREFIX.getKey().length() + 1;
           for (Entry<String,String> entry : loginMap.entrySet()) {
             props.put(entry.getKey().substring(prefixLength), entry.getValue());
           }
-
+          
           token.init(props);
           
           at = token;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/server/src/main/java/org/apache/accumulo/server/util/Admin.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
index 3bb801a..a415de7 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -22,6 +22,8 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
@@ -44,6 +46,7 @@ import com.beust.jcommander.JCommander;
 import com.beust.jcommander.Parameter;
 import com.beust.jcommander.Parameters;
 
+@AccumuloService("admin")
 public class Admin {
   private static final Logger log = Logger.getLogger(Admin.class);
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/Initialize.java b/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
index 7d4e6f2..af053aa 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
@@ -28,6 +28,7 @@ import java.util.UUID;
 
 import jline.console.ConsoleReader;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -82,6 +83,7 @@ import com.beust.jcommander.Parameter;
  * This class is used to setup the directory structure and the root tablet to get an instance started
  * 
  */
+@AccumuloService("init")
 public class Initialize {
   private static final Logger log = Logger.getLogger(Initialize.class);
   private static final String DEFAULT_ROOT_USER = "root";

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java b/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
index 37edb1a..dc01915 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
@@ -16,6 +16,8 @@
  */
 package org.apache.accumulo.server.util;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
+import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.util.CachedConfiguration;
@@ -26,6 +28,7 @@ import org.apache.hadoop.fs.Path;
 
 import com.beust.jcommander.Parameter;
 
+@AccumuloService("zookeeper")
 public class ZooKeeperMain {
   
   static class Opts extends Help {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/start/pom.xml
----------------------------------------------------------------------
diff --git a/start/pom.xml b/start/pom.xml
index 3484aab..133413e 100644
--- a/start/pom.xml
+++ b/start/pom.xml
@@ -26,10 +26,22 @@
   <name>Start</name>
   <dependencies>
     <dependency>
+      <groupId>net.sf.scannotation</groupId>
+      <artifactId>scannotation</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.accumulo</groupId>
+      <artifactId>accumulo-api</artifactId>
+    </dependency>
+    <dependency>
       <groupId>org.apache.commons</groupId>
       <artifactId>commons-vfs2</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.javassist</groupId>
+      <artifactId>javassist</artifactId>
+    </dependency>
+    <dependency>
       <groupId>commons-io</groupId>
       <artifactId>commons-io</artifactId>
       <scope>provided</scope>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/start/src/main/java/org/apache/accumulo/start/Main.java
----------------------------------------------------------------------
diff --git a/start/src/main/java/org/apache/accumulo/start/Main.java b/start/src/main/java/org/apache/accumulo/start/Main.java
index 66a3e71..3821c3f 100644
--- a/start/src/main/java/org/apache/accumulo/start/Main.java
+++ b/start/src/main/java/org/apache/accumulo/start/Main.java
@@ -16,75 +16,65 @@
  */
 package org.apache.accumulo.start;
 
+import java.io.IOException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Set;
 
+import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.start.classloader.AccumuloClassLoader;
+import org.scannotation.AnnotationDB;
 
 public class Main {
   
+  private static AnnotationDB annotationDatabase;
+  
   public static void main(String[] args) throws Exception {
     Runnable r = null;
     
     try {
+      Thread.currentThread().setContextClassLoader(AccumuloClassLoader.getClassLoader());
+      Class<?> vfsClassLoader = AccumuloClassLoader.getClassLoader().loadClass("org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader");
+      ClassLoader cl = (ClassLoader) vfsClassLoader.getMethod("getClassLoader", new Class[] {}).invoke(null, new Object[] {});
+      Thread.currentThread().setContextClassLoader(cl);
+      
+      URL[] urls = (URL[]) vfsClassLoader.getMethod("getURLs", new Class[] {}).invoke(null, new Object[] {});
+      
       if (args.length == 0) {
-        printUsage();
+        printUsage(cl, urls);
         System.exit(1);
       }
       final String argsToPass[] = new String[args.length - 1];
       System.arraycopy(args, 1, argsToPass, 0, args.length - 1);
       
-      Thread.currentThread().setContextClassLoader(AccumuloClassLoader.getClassLoader());
-      
-      Class<?> vfsClassLoader = AccumuloClassLoader.getClassLoader().loadClass("org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader");
-      
-      ClassLoader cl = (ClassLoader) vfsClassLoader.getMethod("getClassLoader", new Class[] {}).invoke(null, new Object[] {});
-      
       Class<?> runTMP = null;
       
-      Thread.currentThread().setContextClassLoader(cl);
-      
-      if (args[0].equals("master")) {
-        runTMP = cl.loadClass("org.apache.accumulo.server.master.Master");
-      } else if (args[0].equals("tserver")) {
-        runTMP = cl.loadClass("org.apache.accumulo.server.tabletserver.TabletServer");
-      } else if (args[0].equals("shell")) {
-        runTMP = cl.loadClass("org.apache.accumulo.core.util.shell.Shell");
-      } else if (args[0].equals("init")) {
-        runTMP = cl.loadClass("org.apache.accumulo.server.util.Initialize");
-      } else if (args[0].equals("admin")) {
-        runTMP = cl.loadClass("org.apache.accumulo.server.util.Admin");
-      } else if (args[0].equals("gc")) {
-        runTMP = cl.loadClass("org.apache.accumulo.server.gc.SimpleGarbageCollector");
-      } else if (args[0].equals("monitor")) {
-        runTMP = cl.loadClass("org.apache.accumulo.server.monitor.Monitor");
-      } else if (args[0].equals("tracer")) {
-        runTMP = cl.loadClass("org.apache.accumulo.server.trace.TraceServer");
-      } else if (args[0].equals("proxy")) {
-        runTMP = cl.loadClass("org.apache.accumulo.proxy.Proxy");
-      } else if (args[0].equals("minicluster")) {
-        runTMP = cl.loadClass("org.apache.accumulo.minicluster.MiniAccumuloRunner");
-      } else if (args[0].equals("classpath")) {
+      if (args[0].equals("classpath")) {
         vfsClassLoader.getMethod("printClassPath", new Class[] {}).invoke(vfsClassLoader, new Object[] {});
         return;
       } else if (args[0].equals("version")) {
         runTMP = cl.loadClass("org.apache.accumulo.core.Constants");
         System.out.println(runTMP.getField("VERSION").get(null));
         return;
-      } else if (args[0].equals("rfile-info")) {
-        runTMP = cl.loadClass("org.apache.accumulo.core.file.rfile.PrintInfo");
-      } else if (args[0].equals("login-info")) {
-        runTMP = cl.loadClass("org.apache.accumulo.core.util.LoginProperties");
-      } else if (args[0].equals("zookeeper")) {
-        runTMP = cl.loadClass("org.apache.accumulo.server.util.ZooKeeperMain");
-      } else if (args[0].equals("create-token")) {
-        runTMP = cl.loadClass("org.apache.accumulo.core.util.CreateToken");
       } else {
-        try {
-          runTMP = cl.loadClass(args[0]);
-        } catch (ClassNotFoundException cnfe) {
-          System.out.println("Classname " + args[0] + " not found.  Please make sure you use the wholly qualified package name.");
-          System.exit(1);
+        for (String className : loadAnnotationDB(urls, AccumuloService.class)) {
+          Class<?> runTMPCandidate = cl.loadClass(className);
+          if (args[0].equals(runTMPCandidate.getAnnotation(AccumuloService.class).value())) {
+            runTMP = runTMPCandidate;
+            break;
+          }
+        }
+        
+        if (runTMP == null) {
+          try {
+            runTMP = cl.loadClass(args[0]);
+          } catch (ClassNotFoundException cnfe) {
+            System.out.println("Classname " + args[0] + " not found.  Please make sure you use the wholly qualified package name.");
+            System.exit(1);
+          }
         }
       }
       Method main = null;
@@ -100,6 +90,7 @@ public class Main {
       final Object thisIsJustOneArgument = argsToPass;
       final Method finalMain = main;
       r = new Runnable() {
+        @Override
         public void run() {
           try {
             finalMain.invoke(null, thisIsJustOneArgument);
@@ -120,7 +111,32 @@ public class Main {
     }
   }
   
-  private static void printUsage() {
-    System.out.println("accumulo init | master | tserver | monitor | shell | admin | gc | classpath | rfile-info | login-info | tracer | minicluster | proxy | zookeeper | create-token | <accumulo class> args");
+  private static void printUsage(ClassLoader cl, URL[] urls) throws IOException, ClassNotFoundException {
+    ArrayList<String> keywords = new ArrayList<String>(20);
+    for (String className : loadAnnotationDB(urls, AccumuloService.class)) {
+      Class<?> runTMPCandidate = cl.loadClass(className);
+      keywords.add(runTMPCandidate.getAnnotation(AccumuloService.class).value());
+    }
+    keywords.add("classpath");
+    keywords.add("version");
+    
+    String prefix = "";
+    String kwString = "";
+    for (String kw : keywords) {
+      kwString += prefix + kw;
+      prefix = " | ";
+    }
+    System.out.println("accumulo " + kwString + " | <accumulo class> args");
+  }
+  
+  protected synchronized static Set<String> loadAnnotationDB(URL[] urls, Class<?> annotationClass) throws IOException {
+    if (annotationDatabase == null) {
+      AnnotationDB database = new AnnotationDB();
+      database.setScanClassAnnotations(true);
+      database.scanArchives(urls);
+      annotationDatabase = database;
+    }
+    Set<String> retVal = annotationDatabase.getAnnotationIndex().get(annotationClass.getName());
+    return retVal == null ? (retVal = Collections.emptySet()) : retVal;
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/dd0b97e5/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
----------------------------------------------------------------------
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
index eb653bc..0ff8843 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
@@ -61,6 +61,7 @@ public class AccumuloVFSClassLoader {
   
   public static class AccumuloVFSClassLoaderShutdownThread implements Runnable {
     
+    @Override
     public void run() {
       try {
         AccumuloVFSClassLoader.close();
@@ -99,7 +100,7 @@ public class AccumuloVFSClassLoader {
   
   public synchronized static <U> Class<? extends U> loadClass(String classname, Class<U> extension) throws ClassNotFoundException {
     try {
-      return (Class<? extends U>) getClassLoader().loadClass(classname).asSubclass(extension);
+      return getClassLoader().loadClass(classname).asSubclass(extension);
     } catch (IOException e) {
       throw new ClassNotFoundException("IO Error loading class " + classname, e);
     }
@@ -282,6 +283,28 @@ public class AccumuloVFSClassLoader {
     });
   }
   
+  public static URL[] getURLs() {
+    ArrayList<URL> urls = new ArrayList<URL>(20);
+    try {
+      ClassLoader cl = getClassLoader();
+      while (cl != null && cl != ClassLoader.getSystemClassLoader()) {
+        if (cl instanceof URLClassLoader) {
+          URLClassLoader ucl = (URLClassLoader) cl;
+          for (URL u : ucl.getURLs())
+            urls.add(u);
+        } else if (cl instanceof VFSClassLoader) {
+          VFSClassLoader vcl = (VFSClassLoader) cl;
+          for (FileObject f : vcl.getFileObjects())
+            urls.add(f.getURL());
+        }
+        cl = cl.getParent();
+      }
+    } catch (Exception t) {
+      throw new RuntimeException(t);
+    }
+    return urls.toArray(new URL[urls.size()]);
+  }
+  
   public static void printClassPath(Printer out) {
     try {
       ClassLoader cl = getClassLoader();


[27/50] [abbrv] git commit: ACCUMULO-1537 improved timing/reliablity of tests

Posted by ct...@apache.org.
ACCUMULO-1537 improved timing/reliablity of tests

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1501860 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/22cff666
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/22cff666
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/22cff666

Branch: refs/heads/ACCUMULO-1496
Commit: 22cff6665d27f2cb7ebc211f032913091c6435f8
Parents: 868ef44
Author: Eric C. Newton <ec...@apache.org>
Authored: Wed Jul 10 17:17:43 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Wed Jul 10 17:17:43 2013 +0000

----------------------------------------------------------------------
 .../org/apache/accumulo/test/ShellServerIT.java |  2 +-
 .../accumulo/test/functional/BloomFilterIT.java | 61 ++++++++++++--------
 .../accumulo/test/functional/ShutdownIT.java    |  2 +-
 3 files changed, 40 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/22cff666/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
index 597722b..37c7e43 100644
--- a/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
@@ -188,7 +188,7 @@ public class ShellServerIT {
   public void tearDown() throws Exception {
     Connector c = cluster.getConnector("root", secret);
     for (String table : c.tableOperations().list()) {
-      if (!table.equals(MetadataTable.NAME) && !table.equals(RootTable.NAME))
+      if (!table.equals(MetadataTable.NAME) && !table.equals(RootTable.NAME) && !table.equals("trace"))
         c.tableOperations().delete(table);
     }
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/22cff666/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
index 9e8e5d3..1c9ea53 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BloomFilterIT.java
@@ -17,8 +17,10 @@
 package org.apache.accumulo.test.functional;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Random;
 
@@ -46,7 +48,15 @@ public class BloomFilterIT extends MacTest {
   
   @Override
   public void configure(MiniAccumuloConfig cfg) {
-    cfg.setDefaultMemory(500, MemoryUnit.MEGABYTE);
+    cfg.setDefaultMemory(1, MemoryUnit.GIGABYTE);
+    cfg.setNumTservers(1);
+    Map<String,String> siteConfig = new HashMap<String, String>();
+    siteConfig.put(Property.TSERV_READ_AHEAD_MAXCONCURRENT.getKey(), "1");
+    siteConfig.put(Property.TABLE_BLOOM_SIZE.getKey(), "2000000");
+    siteConfig.put(Property.TABLE_BLOOM_ERRORRATE.getKey(), "1%");
+    siteConfig.put(Property.TABLE_BLOOM_LOAD_THRESHOLD.getKey(), "0");
+    siteConfig.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1G");
+    cfg.setSiteConfig(siteConfig );
   }
   
   @Test(timeout=200*1000)
@@ -54,10 +64,14 @@ public class BloomFilterIT extends MacTest {
     Connector c = getConnector();
     for (String table : "bt1 bt2 bt3 bt4".split(" ")) {
       c.tableOperations().create(table);
+      c.tableOperations().setProperty(table, Property.TABLE_INDEXCACHE_ENABLED.getKey(), "false");
+      c.tableOperations().setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "false");
     }
-    write(c, "bt1", 1, 0, 1000000000, 250);
-    write(c, "bt2", 2, 0, 1000000000, 250);
-    write(c, "bt3", 3, 0, 1000000000, 250);
+    log.info("Writing");
+    write(c, "bt1", 1, 0, 2000000000, 1000);
+    write(c, "bt2", 2, 0, 2000000000, 1000);
+    write(c, "bt3", 3, 0, 2000000000, 1000);
+    log.info("Writing complete");
     
     // test inserting an empty key
     BatchWriter bw = c.createBatchWriter("bt4", new BatchWriterConfig());
@@ -68,8 +82,6 @@ public class BloomFilterIT extends MacTest {
     c.tableOperations().flush("bt4", null, null, true);
     
     for (String table : new String[] {"bt1", "bt2", "bt3"}) {
-      c.tableOperations().setProperty(table, Property.TABLE_INDEXCACHE_ENABLED.getKey(), "false");
-      c.tableOperations().setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey(), "false");
       c.tableOperations().compact(table, null, null, true, true);
     }
     
@@ -80,10 +92,13 @@ public class BloomFilterIT extends MacTest {
     FunctionalTestUtils.checkRFiles(c, "bt4", 1, 1, 1, 1);
     
     // these queries should only run quickly if bloom filters are working, so lets get a base
-    long t1 = query(c, "bt1", 1, 0, 1000000000, 100000, 250);
-    long t2 = query(c, "bt2", 2, 0, 1000000000, 100000, 250);
-    long t3 = query(c, "bt3", 3, 0, 1000000000, 100000, 250);
+    log.info("Base query");
+    long t1 = query(c, "bt1", 1, 0, 2000000000, 100000, 1000);
+    long t2 = query(c, "bt2", 2, 0, 2000000000, 100000, 1000);
+    long t3 = query(c, "bt3", 3, 0, 2000000000, 100000, 1000);
+    log.info("Base query complete");
     
+    log.info("Rewriting with bloom filters");
     c.tableOperations().setProperty("bt1", Property.TABLE_BLOOM_ENABLED.getKey(), "true");
     c.tableOperations().setProperty("bt1", Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName());
     c.tableOperations().compact("bt1", null, null, false, true);
@@ -99,16 +114,16 @@ public class BloomFilterIT extends MacTest {
     c.tableOperations().setProperty("bt4", Property.TABLE_BLOOM_ENABLED.getKey(), "true");
     c.tableOperations().setProperty("bt4", Property.TABLE_BLOOM_KEY_FUNCTOR.getKey(), RowFunctor.class.getName());
     c.tableOperations().compact("bt4", null, null, false, true);
+    log.info("Rewriting with bloom filters complete");
     
     // these queries should only run quickly if bloom
     // filters are working
-    long tb1 = query(c, "bt1", 1, 0, 1000000000, 100000, 250);
-    long tb2 = query(c, "bt2", 2, 0, 1000000000, 100000, 250);
-    long tb3 = query(c, "bt3", 3, 0, 1000000000, 100000, 250);
-    
-    timeCheck(t1, tb1);
-    timeCheck(t2, tb2);
-    timeCheck(t3, tb3);
+    log.info("Bloom query");
+    long tb1 = query(c, "bt1", 1, 0, 2000000000, 100000, 1000);
+    long tb2 = query(c, "bt2", 2, 0, 2000000000, 100000, 1000);
+    long tb3 = query(c, "bt3", 3, 0, 2000000000, 100000, 1000);
+    log.info("Bloom query complete");
+    timeCheck(t1 + t2 + t3, tb1 + tb2 + tb3);
     
     // test querying for empty key
     Scanner scanner = c.createScanner("bt4", Authorizations.EMPTY);
@@ -121,9 +136,11 @@ public class BloomFilterIT extends MacTest {
   }
   
   private void timeCheck(long t1, long t2) throws Exception {
-    if (((t1 - t2) * 1.0 / t1) < .1) {
-      throw new Exception("Queries had less than 10% improvement (old: " + t1 + " new: " + t2 + " improvement: " + ((t1 - t2) * 100. / t1) + "%)");
+    double improvement = (t1 - t2) * 1.0 / t1;
+    if (improvement < .1) {
+      throw new Exception("Queries had less than 10% improvement (old: " + t1 + " new: " + t2 + " improvement: " + (improvement*100) + "%)");
     }
+    log.info("Improvement: " + (improvement * 100) + "%");
   }
   
   private long query(Connector c, String table, int depth, long start, long end, int num, int step) throws Exception {
@@ -161,24 +178,22 @@ public class BloomFilterIT extends MacTest {
       ranges.add(range);
     }
     
-    BatchScanner bs = c.createBatchScanner(table, Authorizations.EMPTY, 3);
+    BatchScanner bs = c.createBatchScanner(table, Authorizations.EMPTY, 1);
     bs.setRanges(ranges);
     
-    long t1 = System.currentTimeMillis();
-    
+    long t1 = System.currentTimeMillis();   
     for (Entry<Key,Value> entry : bs) {
       long v = Long.parseLong(entry.getValue().toString());
       if (!expected.remove(v)) {
         throw new Exception("Got unexpected return " + entry.getKey() + " " + entry.getValue());
       }
     }
-    
     long t2 = System.currentTimeMillis();
     
     if (expected.size() > 0) {
       throw new Exception("Did not get all expected values " + expected.size());
     }
-    
+
     bs.close();
     
     return t2 - t1;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/22cff666/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
index 4d1b21b..cfa2d15 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
@@ -49,7 +49,7 @@ public class ShutdownIT extends MacTest {
     verify.destroy();
   }
   
-  @Test(timeout=30*1000)
+  @Test(timeout=60*1000)
   public void shutdownDuringDelete() throws Exception {
     assertEquals(0, cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", MacTest.PASSWORD, "--createTable").waitFor());
     Process deleter = cluster.exec(TestRandomDeletes.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", MacTest.PASSWORD);


[39/50] [abbrv] git commit: ACCUMULO-1563 committing Jonathan Hsieh's patch to prevent the test from writing to /

Posted by ct...@apache.org.
ACCUMULO-1563 committing Jonathan Hsieh's patch to prevent the test from writing to /

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1502584 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a904c3a4
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a904c3a4
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a904c3a4

Branch: refs/heads/ACCUMULO-1496
Commit: a904c3a40b9d33b8f9e4320d622ae6c93abddcf5
Parents: e56edc1
Author: Eric C. Newton <ec...@apache.org>
Authored: Fri Jul 12 15:12:11 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Fri Jul 12 15:12:11 2013 +0000

----------------------------------------------------------------------
 test/src/main/java/org/apache/accumulo/test/CreateRFiles.java    | 2 +-
 .../apache/accumulo/test/functional/BulkSplitOptimizationIT.java | 4 ++--
 .../java/org/apache/accumulo/test/functional/CompactionIT.java   | 4 ++--
 3 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/a904c3a4/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java b/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
index fc50ed9..82f9b6b 100644
--- a/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
+++ b/test/src/main/java/org/apache/accumulo/test/CreateRFiles.java
@@ -58,7 +58,7 @@ public class CreateRFiles {
     int count = 0;
     while (currEnd <= opts.end && currStart < currEnd) {
       
-      final String tia = String.format("--rfile /%s/mf%05d --timestamp 1 --size 50 --random 56 --rows %d --start %d --user root", opts.outputDirectory, count, currEnd - currStart, currStart);
+      final String tia = String.format("--rfile %s/mf%05d --timestamp 1 --size 50 --random 56 --rows %d --start %d --user root", opts.outputDirectory, count, currEnd - currStart, currStart);
       
       Runnable r = new Runnable() {
         

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a904c3a4/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
index 32e871e..05b9992 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BulkSplitOptimizationIT.java
@@ -56,9 +56,9 @@ public class BulkSplitOptimizationIT extends MacTest {
     c.tableOperations().setProperty(TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "1G");
     
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
-    FunctionalTestUtils.createRFiles(c, fs, "/tmp/testmf", ROWS, SPLITS, 8);
+    FunctionalTestUtils.createRFiles(c, fs, "tmp/testmf", ROWS, SPLITS, 8);
     
-    FunctionalTestUtils.bulkImport(c, fs, TABLE_NAME, "/tmp/testmf");
+    FunctionalTestUtils.bulkImport(c, fs, TABLE_NAME, "tmp/testmf");
     
     FunctionalTestUtils.checkSplits(c, TABLE_NAME, 0, 0);
     FunctionalTestUtils.checkRFiles(c, TABLE_NAME, 1, 1, 100, 100);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a904c3a4/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java b/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java
index 5b1a83f..5084f37 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/CompactionIT.java
@@ -60,8 +60,8 @@ public class CompactionIT extends MacTest {
     c.tableOperations().create("test_ingest");
     c.tableOperations().setProperty("test_ingest", Property.TABLE_MAJC_RATIO.getKey(), "1.0");
     FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
-    FunctionalTestUtils.createRFiles(c, fs, "/tmp/testrf", 500000, 59, 4);
-    FunctionalTestUtils.bulkImport(c, fs, "test_ingest", "/tmp/testrf");
+    FunctionalTestUtils.createRFiles(c, fs, "tmp/testrf", 500000, 59, 4);
+    FunctionalTestUtils.bulkImport(c, fs, "test_ingest", "tmp/testrf");
     int beforeCount = countFiles(c);
     
     final AtomicBoolean fail = new AtomicBoolean(false);


[33/50] [abbrv] ACCUMULO-998 applying Micheal Allen's updated patch for at-rest encryption

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleParameters.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleParameters.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleParameters.java
new file mode 100644
index 0000000..7027496
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/CryptoModuleParameters.java
@@ -0,0 +1,629 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.core.security.crypto;
+
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.SecureRandom;
+import java.util.Map;
+
+import javax.crypto.Cipher;
+import javax.crypto.CipherOutputStream;
+
+/**
+ * This class defines several parameters needed by by a module providing cryptographic stream support in Accumulo. The following Javadoc details which
+ * parameters are used for which operations (encryption vs. decryption), which ones return values (i.e. are "out" parameters from the {@link CryptoModule}), and
+ * which ones are required versus optional in certain situations.
+ * 
+ * Most of the time, these classes can be constructed using
+ * {@link CryptoModuleFactory#createParamsObjectFromAccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration)}.
+ */
+public class CryptoModuleParameters {
+  
+  /**
+   * Gets the name of the symmetric algorithm to use for encryption.
+   * 
+   * @see CryptoModuleParameters#setAlgorithmName(String)
+   */
+  
+  public String getAlgorithmName() {
+    return algorithmName;
+  }
+  
+  /**
+   * Sets the name of the symmetric algorithm to use for an encryption stream.
+   * <p>
+   * Valid names are names recognized by your cryptographic engine provider. For the default Java provider, valid names would include things like "AES", "RC4",
+   * "DESede", etc.
+   * <p>
+   * For <b>encryption</b>, this value is <b>required</b> and is always used. Its value should be prepended or otherwise included with the ciphertext for future
+   * decryption. <br>
+   * For <b>decryption</b>, this value is often disregarded in favor of the value encoded with the ciphertext.
+   * 
+   * @param algorithmName
+   *          the name of the cryptographic algorithm to use.
+   * @see <a href="http://docs.oracle.com/javase/1.5.0/docs/guide/security/jce/JCERefGuide.html#AppA">Standard Algorithm Names in JCE</a>
+   * 
+   */
+  
+  public void setAlgorithmName(String algorithmName) {
+    this.algorithmName = algorithmName;
+  }
+  
+  /**
+   * Gets the name of the encryption mode to use for encryption.
+   * 
+   * @see CryptoModuleParameters#setEncryptionMode(String)
+   */
+  
+  public String getEncryptionMode() {
+    return encryptionMode;
+  }
+  
+  /**
+   * Sets the name of the encryption mode to use for an encryption stream.
+   * <p>
+   * Valid names are names recognized by your cryptographic engine provider. For the default Java provider, valid names would include things like "EBC", "CBC",
+   * "CFB", etc.
+   * <p>
+   * For <b>encryption</b>, this value is <b>required</b> and is always used. Its value should be prepended or otherwise included with the ciphertext for future
+   * decryption. <br>
+   * For <b>decryption</b>, this value is often disregarded in favor of the value encoded with the ciphertext.
+   * 
+   * @param encryptionMode
+   *          the name of the encryption mode to use.
+   * @see <a href="http://docs.oracle.com/javase/1.5.0/docs/guide/security/jce/JCERefGuide.html#AppA">Standard Mode Names in JCE</a>
+   * 
+   */
+  
+  public void setEncryptionMode(String encryptionMode) {
+    this.encryptionMode = encryptionMode;
+  }
+  
+  /**
+   * Gets the name of the padding type to use for encryption.
+   * 
+   * @see CryptoModuleParameters#setPadding(String)
+   */
+  
+  public String getPadding() {
+    return padding;
+  }
+  
+  /**
+   * Sets the name of the padding type to use for an encryption stream.
+   * <p>
+   * Valid names are names recognized by your cryptographic engine provider. For the default Java provider, valid names would include things like
+   * "PKCS5Padding", "None", etc.
+   * <p>
+   * For <b>encryption</b>, this value is <b>required</b> and is always used. Its value should be prepended or otherwise included with the ciphertext for future
+   * decryption. <br>
+   * For <b>decryption</b>, this value is often disregarded in favor of the value encoded with the ciphertext.
+   * 
+   * @param padding
+   *          the name of the padding type to use.
+   * @see <a href="http://docs.oracle.com/javase/1.5.0/docs/guide/security/jce/JCERefGuide.html#AppA">Standard Padding Names in JCE</a>
+   * 
+   */
+  public void setPadding(String padding) {
+    this.padding = padding;
+  }
+  
+  /**
+   * Gets the plaintext secret key.
+   * <p>
+   * For <b>decryption</b>, this value is often the out parameter of using a secret key encryption strategy to decrypt an encrypted version of this secret key.
+   * (See {@link CryptoModuleParameters#setKeyEncryptionStrategyClass(String)}.)
+   * 
+   * 
+   * @see CryptoModuleParameters#setPlaintextKey(byte[])
+   */
+  public byte[] getPlaintextKey() {
+    return plaintextKey;
+  }
+  
+  /**
+   * Sets the plaintext secret key that will be used to encrypt and decrypt bytes.
+   * <p>
+   * Valid values and lengths for this secret key depend entirely on the algorithm type. Refer to the documentation about the algorithm for further information.
+   * <p>
+   * For <b>encryption</b>, this value is <b>optional</b>. If it is not provided, it will be automatically generated by the underlying cryptographic module. <br>
+   * For <b>decryption</b>, this value is often obtained from the underlying cipher stream, or derived from the encrypted version of the key (see
+   * {@link CryptoModuleParameters#setEncryptedKey(byte[])}).
+   * 
+   * @param plaintextKey
+   *          the value of the plaintext secret key
+   */
+  
+  public void setPlaintextKey(byte[] plaintextKey) {
+    this.plaintextKey = plaintextKey;
+  }
+  
+  /**
+   * Gets the length of the secret key.
+   * 
+   * @see CryptoModuleParameters#setKeyLength(int)
+   */
+  public int getKeyLength() {
+    return keyLength;
+  }
+  
+  /**
+   * Sets the length of the secret key that will be used to encrypt and decrypt bytes.
+   * <p>
+   * Valid lengths depend entirely on the algorithm type. Refer to the documentation about the algorithm for further information. (For example, AES may use
+   * either 128 or 256 bit keys in the default Java cryptography provider.)
+   * <p>
+   * For <b>encryption</b>, this value is <b>required if the secret key is not set</b>. <br>
+   * For <b>decryption</b>, this value is often obtained from the underlying cipher stream, or derived from the encrypted version of the key (see
+   * {@link CryptoModuleParameters#setEncryptedKey(byte[])}).
+   * 
+   * @param keyLength
+   *          the length of the secret key to be generated
+   */
+  
+  public void setKeyLength(int keyLength) {
+    this.keyLength = keyLength;
+  }
+  
+  /**
+   * Gets the random number generator name.
+   * 
+   * @see CryptoModuleParameters#setRandomNumberGenerator(String)
+   */
+  
+  public String getRandomNumberGenerator() {
+    return randomNumberGenerator;
+  }
+  
+  /**
+   * Sets the name of the random number generator to use. The default for this for the baseline JCE implementation is "SHA1PRNG".
+   * <p>
+   * 
+   * <p>
+   * For <b>encryption</b>, this value is <b>required</b>. <br>
+   * For <b>decryption</b>, this value is often obtained from the underlying cipher stream.
+   * 
+   * @param randomNumberGenerator
+   *          the name of the random number generator to use
+   */
+  
+  public void setRandomNumberGenerator(String randomNumberGenerator) {
+    this.randomNumberGenerator = randomNumberGenerator;
+  }
+  
+  /**
+   * Gets the random number generator provider name.
+   * 
+   * @see CryptoModuleParameters#setRandomNumberGeneratorProvider(String)
+   */
+  public String getRandomNumberGeneratorProvider() {
+    return randomNumberGeneratorProvider;
+  }
+  
+  /**
+   * Sets the name of the random number generator provider to use. The default for this for the baseline JCE implementation is "SUN".
+   * <p>
+   * The provider, as the name implies, provides the RNG implementation specified by {@link CryptoModuleParameters#getRandomNumberGenerator()}.
+   * <p>
+   * For <b>encryption</b>, this value is <b>required</b>. <br>
+   * For <b>decryption</b>, this value is often obtained from the underlying cipher stream.
+   * 
+   * @param randomNumberGeneratorProvider
+   *          the name of the provider to use
+   */
+  
+  public void setRandomNumberGeneratorProvider(String randomNumberGeneratorProvider) {
+    this.randomNumberGeneratorProvider = randomNumberGeneratorProvider;
+  }
+  
+  /**
+   * Gets the key encryption strategy class.
+   * 
+   * @see CryptoModuleParameters#setKeyEncryptionStrategyClass(String)
+   */
+  
+  public String getKeyEncryptionStrategyClass() {
+    return keyEncryptionStrategyClass;
+  }
+  
+  /**
+   * Sets the class name of the key encryption strategy class. The class obeys the {@link SecretKeyEncryptionStrategy} interface. It instructs the
+   * {@link DefaultCryptoModule} on how to encrypt the keys it uses to secure the streams.
+   * <p>
+   * The default implementation of this interface, {@link DefaultSecretKeyEncryptionStrategy}, creates a random key encryption key (KEK) as another symmetric
+   * key and places the KEK into HDFS. <i>This is not really very secure.</i> Users of the crypto modules are encouraged to either safeguard that KEK carefully
+   * or to obtain and use another {@link SecretKeyEncryptionStrategy} class.
+   * <p>
+   * For <b>encryption</b>, this value is <b>optional</b>. If it is not specified, then it assumed that the secret keys used for encrypting files will not be
+   * encrypted. This is not a secure approach, thus setting this is highly recommended.<br>
+   * For <b>decryption</b>, this value is often obtained from the underlying cipher stream. However, the underlying stream's value can be overridden (at least
+   * when using {@link DefaultCryptoModule}) by setting the {@link CryptoModuleParameters#setOverrideStreamsSecretKeyEncryptionStrategy(boolean)} to true.
+   * 
+   * @param keyEncryptionStrategyClass
+   *          the name of the key encryption strategy class to use
+   */
+  public void setKeyEncryptionStrategyClass(String keyEncryptionStrategyClass) {
+    this.keyEncryptionStrategyClass = keyEncryptionStrategyClass;
+  }
+  
+  /**
+   * Gets the encrypted version of the plaintext key. This parameter is generally either obtained from an underlying stream or computed in the process of
+   * employed the {@link CryptoModuleParameters#getKeyEncryptionStrategyClass()}.
+   * 
+   * @see CryptoModuleParameters#setEncryptedKey(byte[])
+   */
+  public byte[] getEncryptedKey() {
+    return encryptedKey;
+  }
+  
+  /**
+   * Sets the encrypted version of the plaintext key ({@link CryptoModuleParameters#getPlaintextKey()}). Generally this operation will be done either by:
+   * <p>
+   * <ul>
+   * <li>the code reading an encrypted stream and coming across the encrypted version of one of these keys, OR
+   * <li>the {@link CryptoModuleParameters#getKeyEncryptionStrategyClass()} that encrypted the plaintext key (see
+   * {@link CryptoModuleParameters#getPlaintextKey()}).
+   * <ul>
+   * <p>
+   * For <b>encryption</b>, this value is generally not required, but is usually set by the underlying module during encryption. <br>
+   * For <b>decryption</b>, this value is <b>usually required</b>.
+   * 
+   * 
+   * @param encryptedKey
+   *          the encrypted value of the plaintext key
+   */
+  
+  public void setEncryptedKey(byte[] encryptedKey) {
+    this.encryptedKey = encryptedKey;
+  }
+  
+  /**
+   * Gets the opaque ID associated with the encrypted version of the plaintext key.
+   * 
+   * @see CryptoModuleParameters#setOpaqueKeyEncryptionKeyID(String)
+   * @return
+   */
+  public String getOpaqueKeyEncryptionKeyID() {
+    return opaqueKeyEncryptionKeyID;
+  }
+  
+  /**
+   * Sets an opaque ID assocaited with the encrypted version of the plaintext key.
+   * <p>
+   * Often, implementors of the {@link SecretKeyEncryptionStrategy} will need to record some information about how they encrypted a particular plaintext key.
+   * For example, if the strategy employs several keys for its encryption, it will want to record which key it used. The caller should not have to worry about
+   * the format or contents of this internal ID; thus, the strategy class will encode whatever information it needs into this string. It is then beholden to the
+   * calling code to record this opqaue string properly to the underlying cryptographically-encoded stream, and then set the opaque ID back into this parameter
+   * object upon reading.
+   * <p>
+   * For <b>encryption</b>, this value is generally not required, but will be typically generated and set by the {@link SecretKeyEncryptionStrategy} class (see
+   * {@link CryptoModuleParameters#getKeyEncryptionStrategyClass()}). <br>
+   * For <b>decryption</b>, this value is <b>required</b>, though it will typically be read from the underlying stream.
+   * 
+   * @param opaqueKeyEncryptionKeyID
+   *          the opaque ID assoicated with the encrypted version of the plaintext key (see {@link CryptoModuleParameters#getEncryptedKey()}).
+   */
+  
+  public void setOpaqueKeyEncryptionKeyID(String opaqueKeyEncryptionKeyID) {
+    this.opaqueKeyEncryptionKeyID = opaqueKeyEncryptionKeyID;
+  }
+  
+  /**
+   * Gets the flag that indicates whether or not the module should record its cryptographic parameters to the stream automatically, or rely on the calling code
+   * to do so.
+   * 
+   * @see CryptoModuleParameters#setRecordParametersToStream(boolean)
+   */
+  public boolean getRecordParametersToStream() {
+    return recordParametersToStream;
+  }
+  
+  /**
+   * Gets the flag that indicates whether or not the module should record its cryptographic parameters to the stream automatically, or rely on the calling code
+   * to do so.
+   * 
+   * <p>
+   * 
+   * If this is set to <i>true</i>, then the stream passed to {@link CryptoModule#getEncryptingOutputStream(CryptoModuleParameters)} will be <i>written to by the module</i> before it
+   * is returned to the caller. There are situations where it is easier to let the crypto module do this writing on behalf of the caller, and other times where
+   * it is not appropriate (if the format of the underlying stream must be carefully maintained, for instance).
+   * 
+   * @param recordParametersToStream
+   *          whether or not to require the module to record its parameters to the stream by itself
+   */
+  public void setRecordParametersToStream(boolean recordParametersToStream) {
+    this.recordParametersToStream = recordParametersToStream;
+  }
+  
+  /**
+   * Gets the flag that indicates whether or not to close the underlying stream when the cipher stream is closed.
+   * 
+   * @see CryptoModuleParameters#setCloseUnderylingStreamAfterCryptoStreamClose(boolean)
+   */
+  public boolean getCloseUnderylingStreamAfterCryptoStreamClose() {
+    return closeUnderylingStreamAfterCryptoStreamClose;
+  }
+  
+  /**
+   * Sets the flag that indicates whether or not to close the underlying stream when the cipher stream is closed.
+   * 
+   * <p>
+   * 
+   * {@link CipherOutputStream} will only output its padding bytes when its {@link CipherOutputStream#close()} method is called. However, there are times when a
+   * caller doesn't want its underlying stream closed at the time that the {@link CipherOutputStream} is closed. This flag indicates that the
+   * {@link CryptoModule} should wrap the underlying stream in a basic {@link FilterOutputStream} which will swallow any close() calls and prevent them from
+   * propogating to the underlying stream.
+   * 
+   * @param closeUnderylingStreamAfterCryptoStreamClose
+   *          the flag that indicates whether or not to close the underlying stream when the cipher stream is closed
+   */
+  public void setCloseUnderylingStreamAfterCryptoStreamClose(boolean closeUnderylingStreamAfterCryptoStreamClose) {
+    this.closeUnderylingStreamAfterCryptoStreamClose = closeUnderylingStreamAfterCryptoStreamClose;
+  }
+  
+  /**
+   * Gets the flag that indicates if the underlying stream's key encryption strategy should be overridden by the currently configured key encryption strategy.
+   * 
+   * @see CryptoModuleParameters#setOverrideStreamsSecretKeyEncryptionStrategy(boolean)
+   */
+  public boolean getOverrideStreamsSecretKeyEncryptionStrategy() {
+    return overrideStreamsSecretKeyEncryptionStrategy;
+  }
+  
+  /**
+   * Sets the flag that indicates if the underlying stream's key encryption strategy should be overridden by the currently configured key encryption strategy.
+   * 
+   * <p>
+   * 
+   * So, why is this important? Say you started out with the default secret key encryption strategy. So, now you have a secret key in HDFS that encrypts all the
+   * other secret keys. <i>Then</i> you deploy a key management solution. You want to move that secret key up to the key management server. Great! No problem.
+   * Except, all your encrypted files now contain a setting that says
+   * "hey I was encrypted by the default strategy, so find decrypt my key using that, not the key management server". This setting signals the
+   * {@link CryptoModule} that it should ignore the setting in the file and prefer the one from the configuration.
+   * 
+   * @param overrideStreamsSecretKeyEncryptionStrategy
+   *          the flag that indicates if the underlying stream's key encryption strategy should be overridden by the currently configured key encryption
+   *          strategy
+   */
+  
+  public void setOverrideStreamsSecretKeyEncryptionStrategy(boolean overrideStreamsSecretKeyEncryptionStrategy) {
+    this.overrideStreamsSecretKeyEncryptionStrategy = overrideStreamsSecretKeyEncryptionStrategy;
+  }
+  
+  /**
+   * Gets the plaintext output stream to wrap for encryption.
+   * 
+   * @see CryptoModuleParameters#setPlaintextOutputStream(OutputStream)
+   */
+  public OutputStream getPlaintextOutputStream() {
+    return plaintextOutputStream;
+  }
+  
+  /**
+   * Sets the plaintext output stream to wrap for encryption.
+   * 
+   * <p>
+   * 
+   * For <b>encryption</b>, this parameter is <b>required</b>. <br>
+   * For <b>decryption</b>, this parameter is ignored.
+   * 
+   * @param plaintextOutputStream
+   */
+  public void setPlaintextOutputStream(OutputStream plaintextOutputStream) {
+    this.plaintextOutputStream = plaintextOutputStream;
+  }
+  
+  /**
+   * Gets the encrypted output stream, which is nearly always a wrapped version of the output stream from
+   * {@link CryptoModuleParameters#getPlaintextOutputStream()}.
+   * 
+   * <p>
+   * 
+   * Generally this method is used by {@link CryptoModule} classes as an <i>out</i> parameter from calling
+   * {@link CryptoModule#getEncryptingOutputStream(CryptoModuleParameters)}.
+   * 
+   * @see CryptoModuleParameters#setEncryptedOutputStream(OutputStream)
+   */
+  
+  public OutputStream getEncryptedOutputStream() {
+    return encryptedOutputStream;
+  }
+  
+  /**
+   * Sets the encrypted output stream. This method should really only be called by {@link CryptoModule} implementations unless something very unusual is going
+   * on.
+   * 
+   * @param encryptedOutputStream
+   *          the encrypted version of the stream from output stream from {@link CryptoModuleParameters#getPlaintextOutputStream()}.
+   */
+  public void setEncryptedOutputStream(OutputStream encryptedOutputStream) {
+    this.encryptedOutputStream = encryptedOutputStream;
+  }
+  
+
+  /**
+   * Gets the plaintext input stream, which is nearly always a wrapped version of the output from {@link CryptoModuleParameters#getEncryptedInputStream()}.
+   * 
+   * <p>
+   * 
+   * Generally this method is used by {@link CryptoModule} classes as an <i>out</i> parameter from calling {@link CryptoModule#getDecryptingInputStream(CryptoModuleParameters)}.
+   * 
+   * 
+   * @see CryptoModuleParameters#setPlaintextInputStream(InputStream)
+   */
+  public InputStream getPlaintextInputStream() {
+    return plaintextInputStream;
+  }
+  
+  /**
+   * Sets the plaintext input stream, which is nearly always a wrapped version of the output from {@link CryptoModuleParameters#getEncryptedInputStream()}.
+   * 
+   * <p>
+   * 
+   * This method should really only be called by {@link CryptoModule} implementations.
+   * 
+   * @param plaintextInputStream
+   */
+  
+  public void setPlaintextInputStream(InputStream plaintextInputStream) {
+    this.plaintextInputStream = plaintextInputStream;
+  }
+  
+  /**
+   * Gets the encrypted input stream to wrap for decryption.
+   * 
+   * @see CryptoModuleParameters#setEncryptedInputStream(InputStream)
+   */
+  public InputStream getEncryptedInputStream() {
+    return encryptedInputStream;
+  }
+  
+  /**
+   * Sets the encrypted input stream to wrap for decryption.
+   * 
+   * @param encryptedInputStream
+   */
+  
+  public void setEncryptedInputStream(InputStream encryptedInputStream) {
+    this.encryptedInputStream = encryptedInputStream;
+  }
+  
+  /**
+   * Gets the initialized cipher object.
+   * 
+   * 
+   * @see CryptoModuleParameters#setCipher(Cipher)
+   */
+  public Cipher getCipher() {
+    return cipher;
+  }
+  
+  /**
+   * Sets the initialized cipher object. Generally speaking, callers do not have to create and set this object. There may be circumstances where the cipher
+   * object is created outside of the module (to determine IV lengths, for one). If it is created and you want the module to use the cipher you already
+   * initialized, set it here.
+   * 
+   * @param cipher
+   *          the cipher object
+   */
+  public void setCipher(Cipher cipher) {
+    this.cipher = cipher;
+  }
+  
+  /**
+   * Gets the initialized secure random object.
+   * 
+   * @see CryptoModuleParameters#setSecureRandom(SecureRandom)
+   */
+  public SecureRandom getSecureRandom() {
+    return secureRandom;
+  }
+  
+  /**
+   * Sets the initialized secure random object. Generally speaking, callers do not have to create and set this object. There may be circumstances where the
+   * random object is created outside of the module (for instance, to create a random secret key). If it is created outside the module and you want the module
+   * to use the random object you already created, set it here.
+   * 
+   * @param secureRandom
+   *          the {@link SecureRandom} object
+   */
+  
+  public void setSecureRandom(SecureRandom secureRandom) {
+    this.secureRandom = secureRandom;
+  }
+  
+  /**
+   * Gets the initialization vector to use for this crypto module.
+   * 
+   * @see CryptoModuleParameters#setInitializationVector(byte[])
+   */
+  public byte[] getInitializationVector() {
+    return initializationVector;
+  }
+  
+  /**
+   * Sets the initialization vector to use for this crypto module.
+   * 
+   * <p>
+   * 
+   * For <b>encryption</b>, this parameter is <i>optional</i>. If the initialization vector is created by the caller, for whatever reasons, it can be set here
+   * and the crypto module will use it. <br>
+   * 
+   * For <b>decryption</b>, this parameter is <b>required</b>. It should be read from the underlying stream that contains the encrypted data.
+   * 
+   * @param initializationVector
+   *          the initialization vector to use for this crypto operation.
+   */
+  public void setInitializationVector(byte[] initializationVector) {
+    this.initializationVector = initializationVector;
+  }
+  
+  /**
+   * Gets the overall set of options for the {@link CryptoModule}.
+   * 
+   * @see CryptoModuleParameters#setAllOptions(Map)
+   */
+  public Map<String,String> getAllOptions() {
+    return allOptions;
+  }
+  
+  /**
+   * Sets the overall set of options for the {@link CryptoModule}.
+   * 
+   * <p>
+   * 
+   * Often, options for the cryptographic modules will be encoded as key/value pairs in a configuration file. This map represents those values. It may include
+   * some of the parameters already called out as members of this class. It may contain any number of additional parameters which may be required by different
+   * module or key encryption strategy implementations.
+   * 
+   * @param allOptions
+   *          the set of key/value pairs that confiure a module, based on a configuration file
+   */
+  public void setAllOptions(Map<String,String> allOptions) {
+    this.allOptions = allOptions;
+  }
+  
+  private String algorithmName = null;
+  private String encryptionMode = null;
+  private String padding = null;
+  private byte[] plaintextKey;
+  private int keyLength = 0;
+  private String randomNumberGenerator = null;
+  private String randomNumberGeneratorProvider = null;
+  
+  private String keyEncryptionStrategyClass;
+  private byte[] encryptedKey;
+  private String opaqueKeyEncryptionKeyID;
+  
+  private boolean recordParametersToStream = true;
+  private boolean closeUnderylingStreamAfterCryptoStreamClose = true;
+  private boolean overrideStreamsSecretKeyEncryptionStrategy = false;
+  
+  private OutputStream plaintextOutputStream;
+  private OutputStream encryptedOutputStream;
+  private InputStream plaintextInputStream;
+  private InputStream encryptedInputStream;
+  
+  private Cipher cipher;
+  private SecureRandom secureRandom;
+  private byte[] initializationVector;
+  
+  private Map<String,String> allOptions;
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
index 58e1e75..905eb6e 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModule.java
@@ -22,8 +22,6 @@ import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
 import java.io.PushbackInputStream;
 import java.security.InvalidAlgorithmParameterException;
 import java.security.InvalidKeyException;
@@ -41,250 +39,379 @@ import org.apache.accumulo.core.conf.Property;
 import org.apache.log4j.Logger;
 
 /**
- * This class contains the gritty details around setting up encrypted streams for reading and writing the log file. It obeys the interface CryptoModule, which
- * other developers can implement to change out this logic as necessary.
+ * This class implements the {@link CryptoModule} interface, defining how calling applications can receive encrypted 
+ * input and output streams.  While the default implementation given here allows for a lot of flexibility in terms of 
+ * choices of algorithm, key encryption strategies, and so on, some Accumulo users may choose to swap out this implementation
+ * for others, and can base their implementation details off of this class's work.
+ * 
+ * In general, the module is quite straightforward: provide it with crypto-related settings and an input/output stream, and
+ * it will hand back those streams wrapped in encrypting (or decrypting) streams.
  * 
- * @deprecated This feature is experimental and may go away in future versions.
  */
-@Deprecated
 public class DefaultCryptoModule implements CryptoModule {
   
-  // This is how *I* like to format my variable declarations. Your mileage may vary.
-  
   private static final String ENCRYPTION_HEADER_MARKER = "---Log File Encrypted (v1)---";
   private static Logger log = Logger.getLogger(DefaultCryptoModule.class);
   
   public DefaultCryptoModule() {}
   
+  
   @Override
-  public OutputStream getEncryptingOutputStream(OutputStream out, Map<String,String> cryptoOpts) throws IOException {
-    
-    log.debug("Initializing crypto output stream");
+  public CryptoModuleParameters initializeCipher(CryptoModuleParameters params) {
+    String cipherTransformation = getCipherTransformation(params); 
     
-    String cipherSuite = cryptoOpts.get(Property.CRYPTO_CIPHER_SUITE.getKey());
+    log.trace(String.format("Using cipher suite \"%s\" with key length %d with RNG \"%s\" and RNG provider \"%s\" and key encryption strategy \"%s\"",
+        cipherTransformation, params.getKeyLength(), params.getRandomNumberGenerator(), params.getRandomNumberGeneratorProvider(),
+        params.getKeyEncryptionStrategyClass()));
     
-    if (cipherSuite.equals("NullCipher")) {
-      return out;
+    if (params.getSecureRandom() == null) {
+      SecureRandom secureRandom = DefaultCryptoModuleUtils.getSecureRandom(params.getRandomNumberGenerator(), params.getRandomNumberGeneratorProvider());
+      params.setSecureRandom(secureRandom);
     }
     
-    String algorithmName = cryptoOpts.get(Property.CRYPTO_CIPHER_ALGORITHM_NAME.getKey());
-    String secureRNG = cryptoOpts.get(Property.CRYPTO_SECURE_RNG.getKey());
-    String secureRNGProvider = cryptoOpts.get(Property.CRYPTO_SECURE_RNG_PROVIDER.getKey());
-    SecureRandom secureRandom = DefaultCryptoModuleUtils.getSecureRandom(secureRNG, secureRNGProvider);
-    int keyLength = Integer.parseInt(cryptoOpts.get(Property.CRYPTO_CIPHER_KEY_LENGTH.getKey()));
-    
-    byte[] randomKey = new byte[keyLength / 8];
-    
-    Map<CryptoInitProperty,Object> cryptoInitParams = new HashMap<CryptoInitProperty,Object>();
-    
-    secureRandom.nextBytes(randomKey);
-    cryptoInitParams.put(CryptoInitProperty.PLAINTEXT_SESSION_KEY, randomKey);
-    
-    SecretKeyEncryptionStrategy keyEncryptionStrategy = CryptoModuleFactory.getSecretKeyEncryptionStrategy(cryptoOpts
-        .get(Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey()));
-    SecretKeyEncryptionStrategyContext keyEncryptionStrategyContext = keyEncryptionStrategy.getNewContext();
-    
-    keyEncryptionStrategyContext.setPlaintextSecretKey(randomKey);
-    keyEncryptionStrategyContext.setContext(cryptoOpts);
-    
-    keyEncryptionStrategyContext = keyEncryptionStrategy.encryptSecretKey(keyEncryptionStrategyContext);
-    
-    byte[] encryptedRandomKey = keyEncryptionStrategyContext.getEncryptedSecretKey();
-    String opaqueId = keyEncryptionStrategyContext.getOpaqueKeyEncryptionKeyID();
-    
-    OutputStream cipherOutputStream = getEncryptingOutputStream(out, cryptoOpts, cryptoInitParams);
-    
-    // Get the IV from the init params, since we didn't create it but the other getEncryptingOutputStream did
-    byte[] initVector = (byte[]) cryptoInitParams.get(CryptoInitProperty.INITIALIZATION_VECTOR);
+    Cipher cipher = DefaultCryptoModuleUtils.getCipher(cipherTransformation);
     
-    DataOutputStream dataOut = new DataOutputStream(out);
-    
-    // Write a marker to indicate this is an encrypted log file (in case we read it a plain one and need to
-    // not try to decrypt it. Can happen during a failure when the log's encryption settings are changing.
-    dataOut.writeUTF(ENCRYPTION_HEADER_MARKER);
-    
-    // Write out the cipher suite and algorithm used to encrypt this file. In case the admin changes, we want to still
-    // decode the old format.
-    dataOut.writeUTF(cipherSuite);
-    dataOut.writeUTF(algorithmName);
+    if (params.getInitializationVector() == null) {
+      try {
+        cipher.init(Cipher.ENCRYPT_MODE, new SecretKeySpec(params.getPlaintextKey(), params.getAlgorithmName()), params.getSecureRandom());
+      } catch (InvalidKeyException e) {
+        log.error("Accumulo encountered an unknown error in generating the secret key object (SecretKeySpec) for an encrypted stream");
+        throw new RuntimeException(e);
+      }
+      
+      params.setInitializationVector(cipher.getIV());
+      
+      
+    } else {
+      try {
+        cipher.init(Cipher.ENCRYPT_MODE, new SecretKeySpec(params.getPlaintextKey(), params.getAlgorithmName()), new IvParameterSpec(params.getInitializationVector()));
+      } catch (InvalidKeyException e) {
+        log.error("Accumulo encountered an unknown error in generating the secret key object (SecretKeySpec) for an encrypted stream");
+        throw new RuntimeException(e);
+      } catch (InvalidAlgorithmParameterException e) {
+        log.error("Accumulo encountered an unknown error in setting up the initialization vector for an encrypted stream");
+        throw new RuntimeException(e);
+      }
+    }
+
+    params.setCipher(cipher);
     
-    // Write the init vector to the log file
-    dataOut.writeInt(initVector.length);
-    dataOut.write(initVector);
+    return params;
     
-    // Write out the encrypted session key and the opaque ID
-    dataOut.writeUTF(opaqueId);
-    dataOut.writeInt(encryptedRandomKey.length);
-    dataOut.write(encryptedRandomKey);
+  }
+
+  private String getCipherTransformation(CryptoModuleParameters params) {
+    String cipherSuite = params.getAlgorithmName() + "/" + params.getEncryptionMode() + "/" + params.getPadding();
+    return cipherSuite;
+  }
+  
+  private String[] parseCipherSuite(String cipherSuite) {
+    return cipherSuite.split("/");
+  }
+  
+  private boolean validateNotEmpty(String givenValue, boolean allIsWell, StringBuffer buf, String errorMessage) {
+    if (givenValue == null || givenValue.equals("")) {
+      buf.append(errorMessage);
+      buf.append("\n");
+      return false;
+    }
     
-    // Write the secret key (encrypted) into the log file
-    // dataOut.writeInt(randomKey.length);
-    // dataOut.write(randomKey);
+    return true && allIsWell;
+  }
+
+  private boolean validateNotNull(Object givenValue, boolean allIsWell, StringBuffer buf, String errorMessage) {
+    if (givenValue == null) {
+      buf.append(errorMessage);
+      buf.append("\n");
+      return false;
+    }
     
-    return cipherOutputStream;
+    return true && allIsWell;
   }
+
   
-  @Override
-  public InputStream getDecryptingInputStream(InputStream in, Map<String,String> cryptoOpts) throws IOException {
-    DataInputStream dataIn = new DataInputStream(in);
+  private boolean validateNotZero(int givenValue, boolean allIsWell, StringBuffer buf, String errorMessage) {
+    if (givenValue == 0) {
+      buf.append(errorMessage);
+      buf.append("\n");
+      return false;
+    }
     
-    String marker = dataIn.readUTF();
+    return true && allIsWell;
+  }
+
+  private boolean validateParamsObject(CryptoModuleParameters params, int cipherMode) {
     
-    log.debug("Read encryption header");
-    if (marker.equals(ENCRYPTION_HEADER_MARKER)) {
-      
-      String cipherSuiteFromFile = dataIn.readUTF();
-      String algorithmNameFromFile = dataIn.readUTF();
+    if (cipherMode == Cipher.ENCRYPT_MODE) {
       
-      // Read the secret key and initialization vector from the file
-      int initVectorLength = dataIn.readInt();
-      byte[] initVector = new byte[initVectorLength];
-      dataIn.read(initVector, 0, initVectorLength);
+      StringBuffer errorBuf = new StringBuffer("The following problems were found with the CryptoModuleParameters object you provided for an encrypt operation:\n");
+      boolean allIsWell = true;
       
-      // Read the opaque ID and encrypted session key
-      String opaqueId = dataIn.readUTF();
-      int encryptedSecretKeyLength = dataIn.readInt();
-      byte[] encryptedSecretKey = new byte[encryptedSecretKeyLength];
-      dataIn.read(encryptedSecretKey);
+      allIsWell = validateNotEmpty(params.getAlgorithmName(), allIsWell, errorBuf, "No algorithm name was specified.");
       
-      SecretKeyEncryptionStrategy keyEncryptionStrategy = CryptoModuleFactory.getSecretKeyEncryptionStrategy(cryptoOpts
-          .get(Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey()));
-      SecretKeyEncryptionStrategyContext keyEncryptionStrategyContext = keyEncryptionStrategy.getNewContext();
-      
-      keyEncryptionStrategyContext.setOpaqueKeyEncryptionKeyID(opaqueId);
-      keyEncryptionStrategyContext.setContext(cryptoOpts);
-      keyEncryptionStrategyContext.setEncryptedSecretKey(encryptedSecretKey);
-      
-      keyEncryptionStrategyContext = keyEncryptionStrategy.decryptSecretKey(keyEncryptionStrategyContext);
+      if (allIsWell && params.getAlgorithmName().equals("NullCipher")) {
+        return true;
+      }
       
-      byte[] secretKey = keyEncryptionStrategyContext.getPlaintextSecretKey();
+      allIsWell = validateNotEmpty(params.getPadding(),                       allIsWell, errorBuf, "No padding was specified.");
+      allIsWell = validateNotZero (params.getKeyLength(),                     allIsWell, errorBuf, "No key length was specified.");
+      allIsWell = validateNotEmpty(params.getEncryptionMode(),                allIsWell, errorBuf, "No encryption mode was specified.");
+      allIsWell = validateNotEmpty(params.getRandomNumberGenerator(),         allIsWell, errorBuf, "No random number generator was specified.");
+      allIsWell = validateNotEmpty(params.getRandomNumberGeneratorProvider(), allIsWell, errorBuf, "No random number generate provider was specified.");
+      allIsWell = validateNotNull (params.getPlaintextOutputStream(),         allIsWell, errorBuf, "No plaintext output stream was specified.");
+
+      if (!allIsWell) {
+        log.error("CryptoModulesParameters object is not valid.");
+        log.error(errorBuf.toString());
+        throw new RuntimeException("CryptoModulesParameters object is not valid.");
+      }
       
-      // int secretKeyLength = dataIn.readInt();
-      // byte[] secretKey = new byte[secretKeyLength];
-      // dataIn.read(secretKey, 0, secretKeyLength);
+      return allIsWell;
       
-      Map<CryptoModule.CryptoInitProperty,Object> cryptoInitParams = new HashMap<CryptoModule.CryptoInitProperty,Object>();
-      cryptoInitParams.put(CryptoInitProperty.CIPHER_SUITE, cipherSuiteFromFile);
-      cryptoInitParams.put(CryptoInitProperty.ALGORITHM_NAME, algorithmNameFromFile);
-      cryptoInitParams.put(CryptoInitProperty.PLAINTEXT_SESSION_KEY, secretKey);
-      cryptoInitParams.put(CryptoInitProperty.INITIALIZATION_VECTOR, initVector);
+    } else if (cipherMode == Cipher.DECRYPT_MODE) {
+      StringBuffer errorBuf = new StringBuffer("The following problems were found with the CryptoModuleParameters object you provided for a decrypt operation:\n");
+      boolean allIsWell = true;
+
+      allIsWell = validateNotEmpty(params.getPadding(),                       allIsWell, errorBuf, "No padding was specified.");
+      allIsWell = validateNotZero (params.getKeyLength(),                     allIsWell, errorBuf, "No key length was specified.");
+      allIsWell = validateNotEmpty(params.getEncryptionMode(),                allIsWell, errorBuf, "No encryption mode was specified.");
+      allIsWell = validateNotEmpty(params.getRandomNumberGenerator(),         allIsWell, errorBuf, "No random number generator was specified.");
+      allIsWell = validateNotEmpty(params.getRandomNumberGeneratorProvider(), allIsWell, errorBuf, "No random number generate provider was specified.");
+      allIsWell = validateNotNull (params.getEncryptedInputStream(),          allIsWell, errorBuf, "No encrypted input stream was specified.");
+      allIsWell = validateNotNull (params.getInitializationVector(),          allIsWell, errorBuf, "No initialization vector was specified.");
+      allIsWell = validateNotNull (params.getEncryptedKey(),                  allIsWell, errorBuf, "No encrypted key was specified.");
       
-      InputStream cipherInputStream = getDecryptingInputStream(dataIn, cryptoOpts, cryptoInitParams);
-      return cipherInputStream;
+      if (params.getKeyEncryptionStrategyClass() != null && !params.getKeyEncryptionStrategyClass().equals("NullSecretKeyEncryptionStrategy")) {
+        allIsWell = validateNotEmpty(params.getOpaqueKeyEncryptionKeyID(), allIsWell, errorBuf, "No opqaue key encryption ID was specified.");
+      }
       
-    } else {
-      // Push these bytes back on to the stream. This method is a bit roundabout but isolates our code
-      // from having to understand the format that DataOuputStream uses for its bytes.
-      ByteArrayOutputStream tempByteOut = new ByteArrayOutputStream();
-      DataOutputStream tempOut = new DataOutputStream(tempByteOut);
-      tempOut.writeUTF(marker);
       
-      byte[] bytesToPutBack = tempByteOut.toByteArray();
+      if (!allIsWell) {
+        log.error("CryptoModulesParameters object is not valid.");
+        log.error(errorBuf.toString());
+        throw new RuntimeException("CryptoModulesParameters object is not valid.");
+      }
       
-      PushbackInputStream pushbackStream = new PushbackInputStream(in, bytesToPutBack.length);
-      pushbackStream.unread(bytesToPutBack);
+      return allIsWell;
       
-      return pushbackStream;
-    }
+    } 
     
+    return false;
   }
   
+  
   @Override
-  public OutputStream getEncryptingOutputStream(OutputStream out, Map<String,String> conf, Map<CryptoModule.CryptoInitProperty,Object> cryptoInitParams) {
+  public CryptoModuleParameters getEncryptingOutputStream(CryptoModuleParameters params) throws IOException {
+    
+    log.trace("Initializing crypto output stream (new style)");
+    
+    boolean allParamsOK = validateParamsObject(params, Cipher.ENCRYPT_MODE);
+    if (!allParamsOK) {
+      // This would be weird because the above call should throw an exception, but if they don't we'll check and throw.
+      
+      log.error("CryptoModuleParameters was not valid.");
+      throw new RuntimeException("Invalid CryptoModuleParameters");
+    }
+    
     
-    log.debug("Initializing crypto output stream");
+    // If they want a null output stream, just return their plaintext stream as the encrypted stream
+    if (params.getAlgorithmName().equals("NullCipher")) {
+      params.setEncryptedOutputStream(params.getPlaintextOutputStream());
+      return params;
+    }
+    
+    // Get the secret key
     
-    String cipherSuite = conf.get(Property.CRYPTO_CIPHER_SUITE.getKey());
+    SecureRandom secureRandom = DefaultCryptoModuleUtils.getSecureRandom(params.getRandomNumberGenerator(), params.getRandomNumberGeneratorProvider());
     
-    if (cipherSuite.equals("NullCipher")) {
-      return out;
+    if (params.getPlaintextKey() == null) {
+      byte[] randomKey = new byte[params.getKeyLength() / 8];
+      secureRandom.nextBytes(randomKey);
+      params.setPlaintextKey(randomKey);
     }
     
-    String algorithmName = conf.get(Property.CRYPTO_CIPHER_ALGORITHM_NAME.getKey());
-    String secureRNG = conf.get(Property.CRYPTO_SECURE_RNG.getKey());
-    String secureRNGProvider = conf.get(Property.CRYPTO_SECURE_RNG_PROVIDER.getKey());
-    int keyLength = Integer.parseInt(conf.get(Property.CRYPTO_CIPHER_KEY_LENGTH.getKey()));
-    String keyStrategyName = conf.get(Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey());
+    // Encrypt the secret key
+    
+    SecretKeyEncryptionStrategy keyEncryptionStrategy = CryptoModuleFactory.getSecretKeyEncryptionStrategy(params.getKeyEncryptionStrategyClass());
+    params = keyEncryptionStrategy.encryptSecretKey(params);
+    
+    // Now the encrypted version of the key and any opaque ID are within the params object.  Initialize the cipher.
     
-    log.debug(String.format(
-        "Using cipher suite \"%s\" (algorithm \"%s\") with key length %d with RNG \"%s\" and RNG provider \"%s\" and key encryption strategy %s", cipherSuite,
-        algorithmName, keyLength, secureRNG, secureRNGProvider, keyStrategyName));
+    // Check if the caller wants us to close the downstream stream when close() is called on the
+    // cipher object.  Calling close() on a CipherOutputStream is necessary for it to write out
+    // padding bytes.
+    if (!params.getCloseUnderylingStreamAfterCryptoStreamClose()) {
+      params.setPlaintextOutputStream(new DiscardCloseOutputStream(params.getPlaintextOutputStream()));
+    }
+
+    if (params.getCipher() == null) {
+      initializeCipher(params);
+    }
     
-    SecureRandom secureRandom = DefaultCryptoModuleUtils.getSecureRandom(secureRNG, secureRNGProvider);
-    Cipher cipher = DefaultCryptoModuleUtils.getCipher(cipherSuite);
-    byte[] randomKey = (byte[]) cryptoInitParams.get(CryptoInitProperty.PLAINTEXT_SESSION_KEY);
-    byte[] initVector = (byte[]) cryptoInitParams.get(CryptoInitProperty.INITIALIZATION_VECTOR);
+    CipherOutputStream cipherOutputStream = new CipherOutputStream(params.getPlaintextOutputStream(), params.getCipher());
+    BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(cipherOutputStream);    
     
-    // If they pass us an IV, use it...
-    if (initVector != null) {
+    params.setEncryptedOutputStream(bufferedOutputStream);
+    
+    if (params.getRecordParametersToStream()) {
+      DataOutputStream dataOut = new DataOutputStream(params.getPlaintextOutputStream());
       
-      try {
-        cipher.init(Cipher.ENCRYPT_MODE, new SecretKeySpec(randomKey, algorithmName), new IvParameterSpec(initVector));
-      } catch (InvalidKeyException e) {
-        log.error("Accumulo encountered an unknown error in generating the secret key object (SecretKeySpec) for an encrypted stream");
-        throw new RuntimeException(e);
-      } catch (InvalidAlgorithmParameterException e) {
-        log.error("Accumulo encountered an unknown error in generating the secret key object (SecretKeySpec) for an encrypted stream");
-        throw new RuntimeException(e);
-      }
+      // Write a marker to indicate this is an encrypted log file (in case we read it a plain one and need to
+      // not try to decrypt it. Can happen during a failure when the log's encryption settings are changing.      
+      dataOut.writeUTF(ENCRYPTION_HEADER_MARKER);
       
-    } else {
-      // We didn't get an IV, so we'll let the cipher make one for us and then put its value back into the map so
-      // that the caller has access to it, to persist it.
-      try {
-        cipher.init(Cipher.ENCRYPT_MODE, new SecretKeySpec(randomKey, algorithmName), secureRandom);
-      } catch (InvalidKeyException e) {
-        log.error("Accumulo encountered an unknown error in generating the secret key object (SecretKeySpec) for the write-ahead log");
-        throw new RuntimeException(e);
+      
+      // Write out all the parameters
+      dataOut.writeInt(params.getAllOptions().size());
+      for (String key : params.getAllOptions().keySet()) {
+        dataOut.writeUTF(key);
+        dataOut.writeUTF(params.getAllOptions().get(key));
       }
+
+      // Write out the cipher suite and algorithm used to encrypt this file. In case the admin changes, we want to still
+      // decode the old format.
+      dataOut.writeUTF(getCipherTransformation(params));
+      dataOut.writeUTF(params.getAlgorithmName());
+      
+      // Write the init vector to the log file
+      dataOut.writeInt(params.getInitializationVector().length);
+      dataOut.write(params.getInitializationVector());
       
-      // Since the IV length is determined by the algorithm, we let the cipher generate our IV for us,
-      // rather than calling secure random directly.
-      initVector = cipher.getIV();
-      cryptoInitParams.put(CryptoInitProperty.INITIALIZATION_VECTOR, initVector);
+      // Write out the encrypted session key and the opaque ID
+      dataOut.writeUTF(params.getOpaqueKeyEncryptionKeyID());
+      dataOut.writeInt(params.getEncryptedKey().length);
+      dataOut.write(params.getEncryptedKey());
     }
     
-    CipherOutputStream cipherOutputStream = new CipherOutputStream(out, cipher);
-    BufferedOutputStream bufferedCipherOutputStream = new BufferedOutputStream(cipherOutputStream);
-    
-    return bufferedCipherOutputStream;
+    return params;
   }
   
   @Override
-  public InputStream getDecryptingInputStream(InputStream in, Map<String,String> cryptoOpts, Map<CryptoModule.CryptoInitProperty,Object> cryptoInitParams)
-      throws IOException {
-    String cipherSuite = cryptoOpts.get(Property.CRYPTO_CIPHER_SUITE.getKey());
-    String algorithmName = cryptoOpts.get(Property.CRYPTO_CIPHER_ALGORITHM_NAME.getKey());
-    String cipherSuiteFromInitParams = (String) cryptoInitParams.get(CryptoInitProperty.CIPHER_SUITE);
-    String algorithmNameFromInitParams = (String) cryptoInitParams.get(CryptoInitProperty.ALGORITHM_NAME);
-    byte[] initVector = (byte[]) cryptoInitParams.get(CryptoInitProperty.INITIALIZATION_VECTOR);
-    byte[] secretKey = (byte[]) cryptoInitParams.get(CryptoInitProperty.PLAINTEXT_SESSION_KEY);
-    
-    if (initVector == null || secretKey == null || cipherSuiteFromInitParams == null || algorithmNameFromInitParams == null) {
-      log.error("Called getDecryptingInputStream() without proper crypto init params.  Need initVector, plaintext key, cipher suite and algorithm name");
-      throw new RuntimeException("Called getDecryptingInputStream() without initialization vector and/or plaintext session key");
+  public CryptoModuleParameters getDecryptingInputStream(CryptoModuleParameters params) throws IOException {
+    log.trace("About to initialize decryption stream (new style)");
+        
+    if (params.getRecordParametersToStream()) {
+      DataInputStream dataIn = new DataInputStream(params.getEncryptedInputStream());
+      log.trace("About to read encryption parameters from underlying stream");
+      
+      String marker = dataIn.readUTF();
+      if (marker.equals(ENCRYPTION_HEADER_MARKER)) {
+        
+        Map<String, String> paramsFromFile = new HashMap<String, String>();
+        
+        // Read in the bulk of parameters
+        int paramsCount = dataIn.readInt();
+        for (int i = 0; i < paramsCount; i++) {
+          String key = dataIn.readUTF();
+          String value = dataIn.readUTF();
+          
+          paramsFromFile.put(key, value);
+        }
+                
+        // Set the cipher parameters
+        String cipherSuiteFromFile = dataIn.readUTF();
+        String algorithmNameFromFile = dataIn.readUTF();
+        String[] cipherSuiteParts = parseCipherSuite(cipherSuiteFromFile);
+        params.setAlgorithmName(algorithmNameFromFile);
+        params.setEncryptionMode(cipherSuiteParts[1]);
+        params.setPadding(cipherSuiteParts[2]);
+        
+        
+        // Read the secret key and initialization vector from the file
+        int initVectorLength = dataIn.readInt();
+        byte[] initVector = new byte[initVectorLength];
+        dataIn.read(initVector, 0, initVectorLength);
+        
+        params.setInitializationVector(initVector);
+        
+        // Read the opaque ID and encrypted session key
+        String opaqueId = dataIn.readUTF();
+        params.setOpaqueKeyEncryptionKeyID(opaqueId);
+        
+        int encryptedSecretKeyLength = dataIn.readInt();
+        byte[] encryptedSecretKey = new byte[encryptedSecretKeyLength]; 
+        dataIn.read(encryptedSecretKey);
+        params.setEncryptedKey(encryptedSecretKey);
+        
+        
+        if (params.getOverrideStreamsSecretKeyEncryptionStrategy()) {
+          // Merge in options from file selectively
+          for (String name : paramsFromFile.keySet()) {
+            if (!name.equals(Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey())) {
+              params.getAllOptions().put(name, paramsFromFile.get(name));
+            }
+          }
+          params.setKeyEncryptionStrategyClass(params.getAllOptions().get(Property.CRYPTO_SECRET_KEY_ENCRYPTION_STRATEGY_CLASS.getKey()));
+        } else {
+          params = CryptoModuleFactory.fillParamsObjectFromStringMap(params, paramsFromFile);
+        }
+             
+        SecretKeyEncryptionStrategy keyEncryptionStrategy = CryptoModuleFactory.getSecretKeyEncryptionStrategy(params.getKeyEncryptionStrategyClass());
+        
+        params = keyEncryptionStrategy.decryptSecretKey(params);
+        
+      } else {
+        
+        log.trace("Read something off of the encrypted input stream that was not the encryption header marker, so pushing back bytes and returning the given stream");
+        // Push these bytes back on to the stream. This method is a bit roundabout but isolates our code
+        // from having to understand the format that DataOuputStream uses for its bytes.
+        ByteArrayOutputStream tempByteOut = new ByteArrayOutputStream();
+        DataOutputStream tempOut = new DataOutputStream(tempByteOut);
+        tempOut.writeUTF(marker);
+        
+        byte[] bytesToPutBack = tempByteOut.toByteArray();
+        
+        PushbackInputStream pushbackStream = new PushbackInputStream(params.getEncryptedInputStream(), bytesToPutBack.length);
+        pushbackStream.unread(bytesToPutBack);
+        
+        params.setPlaintextInputStream(pushbackStream);
+        
+        return params;
+      }      
     }
     
-    // Always use the init param's cipher suite, but check it against configured one and warn about discrepencies.
-    if (!cipherSuiteFromInitParams.equals(cipherSuite) || !algorithmNameFromInitParams.equals(algorithmName))
-      log.warn(String.format("Configured cipher suite and algorithm (\"%s\" and \"%s\") is different "
-          + "from cipher suite found in log file (\"%s\" and \"%s\")", cipherSuite, algorithmName, cipherSuiteFromInitParams, algorithmNameFromInitParams));
+    // We validate here after reading parameters from the stream, not at the top of the function.
+    boolean allParamsOK = validateParamsObject(params, Cipher.DECRYPT_MODE);
     
-    Cipher cipher = DefaultCryptoModuleUtils.getCipher(cipherSuiteFromInitParams);
+    if (!allParamsOK) {
+      log.error("CryptoModuleParameters object failed validation for decrypt");
+      throw new RuntimeException("CryptoModuleParameters object failed validation for decrypt");
+    }
+    
+    Cipher cipher = DefaultCryptoModuleUtils.getCipher(getCipherTransformation(params));
     
     try {
-      cipher.init(Cipher.DECRYPT_MODE, new SecretKeySpec(secretKey, algorithmNameFromInitParams), new IvParameterSpec(initVector));
+      cipher.init(Cipher.DECRYPT_MODE, new SecretKeySpec(params.getPlaintextKey(), params.getAlgorithmName()), new IvParameterSpec(params.getInitializationVector()));
     } catch (InvalidKeyException e) {
       log.error("Error when trying to initialize cipher with secret key");
       throw new RuntimeException(e);
     } catch (InvalidAlgorithmParameterException e) {
       log.error("Error when trying to initialize cipher with initialization vector");
       throw new RuntimeException(e);
-    }
+    }   
+    
     
-    BufferedInputStream bufferedDecryptingInputStream = new BufferedInputStream(new CipherInputStream(in, cipher));
+    BufferedInputStream bufferedDecryptingInputStream = new BufferedInputStream(new CipherInputStream(params.getEncryptedInputStream(), cipher));
+
+    log.trace("Initialized cipher input stream with transformation ["+getCipherTransformation(params)+"]");
     
-    return bufferedDecryptingInputStream;
+    params.setPlaintextInputStream(bufferedDecryptingInputStream);
+
+    return params;
+  }
+
+  @Override
+  public CryptoModuleParameters generateNewRandomSessionKey(CryptoModuleParameters params) {
+
+    if (params.getSecureRandom() == null) {
+      params.setSecureRandom(DefaultCryptoModuleUtils.getSecureRandom(params.getRandomNumberGenerator(), params.getRandomNumberGeneratorProvider()));
+    }
+    byte[] newSessionKey = new byte[params.getKeyLength() / 8];
+
+    params.getSecureRandom().nextBytes(newSessionKey);
+    params.setPlaintextKey(newSessionKey);
     
+    return params;
   }
+  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java
index 712d517..34ec1f3 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultCryptoModuleUtils.java
@@ -68,5 +68,4 @@ public class DefaultCryptoModuleUtils {
     return cipher;
   }
   
-  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultSecretKeyEncryptionStrategy.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultSecretKeyEncryptionStrategy.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultSecretKeyEncryptionStrategy.java
index 10690b5..f0ece50 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultSecretKeyEncryptionStrategy.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/DefaultSecretKeyEncryptionStrategy.java
@@ -17,21 +17,16 @@
 
 package org.apache.accumulo.core.security.crypto;
 
-import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.security.InvalidAlgorithmParameterException;
 import java.security.InvalidKeyException;
+import java.security.Key;
+import java.security.NoSuchAlgorithmException;
 import java.security.SecureRandom;
-import java.util.HashMap;
-import java.util.Map;
 
 import javax.crypto.Cipher;
-import javax.crypto.CipherOutputStream;
-import javax.crypto.spec.IvParameterSpec;
+import javax.crypto.IllegalBlockSizeException;
 import javax.crypto.spec.SecretKeySpec;
 
 import org.apache.accumulo.core.conf.Property;
@@ -42,231 +37,78 @@ import org.apache.log4j.Logger;
 
 public class DefaultSecretKeyEncryptionStrategy implements SecretKeyEncryptionStrategy {
   
-  private static final Logger log = Logger.getLogger(DefaultSecretKeyEncryptionStrategy.class); 
-  
-  public static class DefaultSecretKeyEncryptionStrategyContext implements SecretKeyEncryptionStrategyContext {
-
-    private byte[] plaintextSecretKey;
-    private byte[] encryptedSecretKey;
-    private Map<String, String> context;
-    private String opaqueKeyId;
-    
-    @Override
-    public String getOpaqueKeyEncryptionKeyID() {
-      return opaqueKeyId;
-    }
-
-    @Override
-    public void setOpaqueKeyEncryptionKeyID(String id) {
-      this.opaqueKeyId = id;
-    }
-
-    @Override
-    public byte[] getPlaintextSecretKey() {
-      return plaintextSecretKey;
-    }
-
-    @Override
-    public void setPlaintextSecretKey(byte[] key) {
-      this.plaintextSecretKey = key;
-    }
-
-    @Override
-    public byte[] getEncryptedSecretKey() {
-      return encryptedSecretKey;
-    }
-
-    @Override
-    public void setEncryptedSecretKey(byte[] key) {
-      this.encryptedSecretKey = key;
-    }
-
-    @Override
-    public Map<String,String> getContext() {
-      return context;
-    }
-
-    @Override
-    public void setContext(Map<String,String> context) {
-      this.context = context;
-    }
-  }
-  
-  
-  @Override
-  public SecretKeyEncryptionStrategyContext encryptSecretKey(SecretKeyEncryptionStrategyContext context)  {
-    String hdfsURI = context.getContext().get(Property.CRYPTO_DEFAULT_KEY_STRATEGY_HDFS_URI.getKey());
-    String pathToKeyName = context.getContext().get(Property.CRYPTO_DEFAULT_KEY_STRATEGY_KEY_LOCATION.getKey());
-    Path pathToKey = new Path(pathToKeyName);
-    
-    FileSystem fs = getHadoopFileSystem(hdfsURI);
-    try {
-      
-      doKeyEncryptionOperation(Cipher.ENCRYPT_MODE, context, pathToKeyName, pathToKey, fs);
-      
-    } catch (IOException e) {
-      log.error(e);
-      throw new RuntimeException(e);
-    }
-  
-    return context;
-    
-  }
+  private static final Logger log = Logger.getLogger(DefaultSecretKeyEncryptionStrategy.class);
 
-  private void initializeKeyEncryptingKey(FileSystem fs, Path pathToKey, SecretKeyEncryptionStrategyContext context) throws IOException {
-    Map<String, String> cryptoContext = context.getContext(); 
-    DataOutputStream out = fs.create(pathToKey);
-    // Very important, lets hedge our bets
-    fs.setReplication(pathToKey, (short) 5);
-    
-    // Write number of context entries
-    out.writeInt(cryptoContext.size());
-    
-    for (String key : cryptoContext.keySet()) {
-      out.writeUTF(key);
-      out.writeUTF(cryptoContext.get(key));
-    }
-    
-    SecureRandom random = DefaultCryptoModuleUtils.getSecureRandom(cryptoContext.get(Property.CRYPTO_SECURE_RNG.getKey()), cryptoContext.get(Property.CRYPTO_SECURE_RNG_PROVIDER.getKey()));
-    int keyLength = Integer.parseInt(cryptoContext.get(Property.CRYPTO_CIPHER_KEY_LENGTH.getKey()));
-    byte[] newRandomKeyEncryptionKey = new byte[keyLength / 8];
-    
-    random.nextBytes(newRandomKeyEncryptionKey);
-    
-    Cipher cipher = DefaultCryptoModuleUtils.getCipher(cryptoContext.get(Property.CRYPTO_CIPHER_SUITE.getKey()));
-    try {
-      cipher.init(Cipher.ENCRYPT_MODE, new SecretKeySpec(newRandomKeyEncryptionKey, cryptoContext.get(Property.CRYPTO_CIPHER_ALGORITHM_NAME.getKey())), random);
-    } catch (InvalidKeyException e) {
-      log.error(e);
-      throw new RuntimeException(e);
-    }
-    
-    byte[] initVector = cipher.getIV();
-    
-    out.writeInt(initVector.length);
-    out.write(initVector);
-    
-    out.writeInt(newRandomKeyEncryptionKey.length);
-    out.write(newRandomKeyEncryptionKey);
-    
-    out.flush();
-    out.close();
-    
-  }
-
-  private FileSystem getHadoopFileSystem(String hdfsURI) {
-    FileSystem fs = null;
-    
-    if (hdfsURI != null && !hdfsURI.equals("")) {
-      try {
-        fs = FileSystem.get(CachedConfiguration.getInstance());
-      } catch (IOException e) {
-        log.error(e);
-        throw new RuntimeException(e);
-      }
-    }
-    else {
-      try {
-        fs = FileSystem.get(new URI(hdfsURI), CachedConfiguration.getInstance());
-      } catch (URISyntaxException e) {
-        log.error(e);
-        throw new RuntimeException(e);
-      } catch (IOException e) {
-        log.error(e);
-        throw new RuntimeException(e);
-      }
-      
-      
-    }
-    return fs;
-  }
-  
-  @Override
-  public SecretKeyEncryptionStrategyContext decryptSecretKey(SecretKeyEncryptionStrategyContext context) {
-    String hdfsURI = context.getContext().get(Property.CRYPTO_DEFAULT_KEY_STRATEGY_HDFS_URI.getKey());
-    String pathToKeyName = context.getContext().get(Property.CRYPTO_DEFAULT_KEY_STRATEGY_KEY_LOCATION.getKey());
-    Path pathToKey = new Path(pathToKeyName);
-    
-    FileSystem fs = getHadoopFileSystem(hdfsURI);
-    try {
-      doKeyEncryptionOperation(Cipher.DECRYPT_MODE, context, pathToKeyName, pathToKey, fs);
-      
-      
-    } catch (IOException e) {
-      log.error(e);
-      throw new RuntimeException(e);
-    }
-    
-    return context;
-  }
-
-  private void doKeyEncryptionOperation(int encryptionMode, SecretKeyEncryptionStrategyContext context, String pathToKeyName, Path pathToKey, FileSystem fs)
+  private void doKeyEncryptionOperation(int encryptionMode, CryptoModuleParameters params, String pathToKeyName, Path pathToKey, FileSystem fs)
       throws IOException {
     DataInputStream in = null;
     try {
       if (!fs.exists(pathToKey)) {
         
-        if (encryptionMode == Cipher.DECRYPT_MODE) {
+        if (encryptionMode == Cipher.UNWRAP_MODE) {
           log.error("There was a call to decrypt the session key but no key encryption key exists.  Either restore it, reconfigure the conf file to point to it in HDFS, or throw the affected data away and begin again.");
           throw new RuntimeException("Could not find key encryption key file in configured location in HDFS ("+pathToKeyName+")");
         } else {
-          initializeKeyEncryptingKey(fs, pathToKey, context);
+          DataOutputStream out = null;
+          try {
+            out = fs.create(pathToKey);
+            // Very important, lets hedge our bets
+            fs.setReplication(pathToKey, (short) 5);
+            SecureRandom random = DefaultCryptoModuleUtils.getSecureRandom(params.getRandomNumberGenerator(), params.getRandomNumberGeneratorProvider());
+            int keyLength = params.getKeyLength();
+            byte[] newRandomKeyEncryptionKey = new byte[keyLength / 8];
+            random.nextBytes(newRandomKeyEncryptionKey);
+            out.writeInt(newRandomKeyEncryptionKey.length);
+            out.write(newRandomKeyEncryptionKey);
+            out.flush();
+          } finally {
+            if (out != null) {
+              out.close();        
+            }
+          }
+
         }
       }
       in = fs.open(pathToKey);
-      
-      int numOfOpts = in.readInt();
-      Map<String, String> optsFromFile = new HashMap<String, String>();
-      
-      for (int i = 0; i < numOfOpts; i++) {
-        String key = in.readUTF();
-        String value = in.readUTF();
-        
-        optsFromFile.put(key, value);
-      }
-      
-      int ivLength = in.readInt();
-      byte[] iv = new byte[ivLength];
-      in.read(iv);
-      
-      
+            
       int keyEncryptionKeyLength = in.readInt();
       byte[] keyEncryptionKey = new byte[keyEncryptionKeyLength];
       in.read(keyEncryptionKey);
       
-      Cipher cipher = DefaultCryptoModuleUtils.getCipher(optsFromFile.get(Property.CRYPTO_CIPHER_SUITE.getKey()));
+      Cipher cipher = DefaultCryptoModuleUtils.getCipher(params.getAllOptions().get(Property.CRYPTO_DEFAULT_KEY_STRATEGY_CIPHER_SUITE.getKey()));
 
       try {
-        cipher.init(encryptionMode, new SecretKeySpec(keyEncryptionKey, optsFromFile.get(Property.CRYPTO_CIPHER_ALGORITHM_NAME.getKey())), new IvParameterSpec(iv));
+        cipher.init(encryptionMode, new SecretKeySpec(keyEncryptionKey, params.getAlgorithmName()));
       } catch (InvalidKeyException e) {
         log.error(e);
         throw new RuntimeException(e);
-      } catch (InvalidAlgorithmParameterException e) {
-        log.error(e);
-        throw new RuntimeException(e);
-      }
-
-      ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
-      CipherOutputStream cipherStream = new CipherOutputStream(byteArrayOutputStream, cipher);
-      
+      }      
       
-      if (Cipher.DECRYPT_MODE == encryptionMode) {
-        cipherStream.write(context.getEncryptedSecretKey());
-        cipherStream.flush();        
-        byte[] plaintextSecretKey = byteArrayOutputStream.toByteArray();
-
-        cipherStream.close();
-        
-        context.setPlaintextSecretKey(plaintextSecretKey);
+      if (Cipher.UNWRAP_MODE == encryptionMode) {
+        try {
+          Key plaintextKey = cipher.unwrap(params.getEncryptedKey(), params.getAlgorithmName(), Cipher.SECRET_KEY);
+          params.setPlaintextKey(plaintextKey.getEncoded());
+        } catch (InvalidKeyException e) {
+          log.error(e);
+          throw new RuntimeException(e);
+        } catch (NoSuchAlgorithmException e) {
+          log.error(e);
+          throw new RuntimeException(e);
+        }
       } else {
-        cipherStream.write(context.getPlaintextSecretKey());
-        cipherStream.flush();        
-        byte[] encryptedSecretKey = byteArrayOutputStream.toByteArray();
-
-        cipherStream.close();
+        Key plaintextKey = new SecretKeySpec(params.getPlaintextKey(), params.getAlgorithmName());
+        try {
+          byte[] encryptedSecretKey = cipher.wrap(plaintextKey);
+          params.setEncryptedKey(encryptedSecretKey);
+          params.setOpaqueKeyEncryptionKeyID(pathToKeyName);
+        } catch (InvalidKeyException e) {
+          log.error(e);
+          throw new RuntimeException(e);
+        } catch (IllegalBlockSizeException e) {
+          log.error(e);
+          throw new RuntimeException(e);
+        }
         
-        context.setEncryptedSecretKey(encryptedSecretKey);
-        context.setOpaqueKeyEncryptionKeyID(pathToKeyName);
       }
       
     } finally {
@@ -276,9 +118,71 @@ public class DefaultSecretKeyEncryptionStrategy implements SecretKeyEncryptionSt
     }
   }
 
+
+  private String getFullPathToKey(CryptoModuleParameters params) {
+    String pathToKeyName = params.getAllOptions().get(Property.CRYPTO_DEFAULT_KEY_STRATEGY_KEY_LOCATION.getKey());
+    String instanceDirectory = params.getAllOptions().get(Property.INSTANCE_DFS_DIR.getKey());
+    
+    
+    if (pathToKeyName == null) {
+      pathToKeyName = Property.CRYPTO_DEFAULT_KEY_STRATEGY_KEY_LOCATION.getDefaultValue();
+    }
+    
+    if (instanceDirectory == null) {
+      instanceDirectory = Property.INSTANCE_DFS_DIR.getDefaultValue();
+    }
+    
+    if (!pathToKeyName.startsWith("/")) {
+      pathToKeyName = "/" + pathToKeyName;
+    }
+    
+    String fullPath = instanceDirectory + pathToKeyName;
+    return fullPath;
+  }
+  
   @Override
-  public SecretKeyEncryptionStrategyContext getNewContext() {
-    return new DefaultSecretKeyEncryptionStrategyContext();
+  public CryptoModuleParameters encryptSecretKey(CryptoModuleParameters params) {
+    String hdfsURI = params.getAllOptions().get(Property.INSTANCE_DFS_URI.getKey());
+    if (hdfsURI == null) {
+      hdfsURI = Property.INSTANCE_DFS_URI.getDefaultValue();
+    }
+    
+    String fullPath = getFullPathToKey(params);
+    Path pathToKey = new Path(fullPath);
+    
+    try {
+      FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());   
+      doKeyEncryptionOperation(Cipher.WRAP_MODE, params, fullPath, pathToKey, fs);
+      
+    } catch (IOException e) {
+      log.error(e);
+      throw new RuntimeException(e);
+    }
+    
+    return params;
+  }
+  
+  @Override
+  public CryptoModuleParameters decryptSecretKey(CryptoModuleParameters params) {
+    String hdfsURI = params.getAllOptions().get(Property.INSTANCE_DFS_URI.getKey());
+    if (hdfsURI == null) {
+      hdfsURI = Property.INSTANCE_DFS_URI.getDefaultValue(); 
+    }
+    
+    String pathToKeyName = getFullPathToKey(params);
+    Path pathToKey = new Path(pathToKeyName);
+    
+    try {
+      FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());   
+      doKeyEncryptionOperation(Cipher.UNWRAP_MODE, params, pathToKeyName, pathToKey, fs);
+      
+      
+    } catch (IOException e) {
+      log.error(e);
+      throw new RuntimeException(e);
+    }
+        
+    return params;
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/security/crypto/DiscardCloseOutputStream.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/DiscardCloseOutputStream.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/DiscardCloseOutputStream.java
new file mode 100644
index 0000000..846cf35
--- /dev/null
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/DiscardCloseOutputStream.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.core.security.crypto;
+
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+
+import org.apache.log4j.Logger;
+
+public class DiscardCloseOutputStream extends FilterOutputStream {
+
+  private static final Logger log = Logger.getLogger(DiscardCloseOutputStream.class);
+  
+  public DiscardCloseOutputStream(OutputStream out) {
+    super(out);
+  }
+  
+  public void close() throws IOException {
+    // Discard
+    log.trace("Discarded close");
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/main/java/org/apache/accumulo/core/security/crypto/SecretKeyEncryptionStrategy.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/security/crypto/SecretKeyEncryptionStrategy.java b/core/src/main/java/org/apache/accumulo/core/security/crypto/SecretKeyEncryptionStrategy.java
index aa3f76d..637ed4b 100644
--- a/core/src/main/java/org/apache/accumulo/core/security/crypto/SecretKeyEncryptionStrategy.java
+++ b/core/src/main/java/org/apache/accumulo/core/security/crypto/SecretKeyEncryptionStrategy.java
@@ -19,11 +19,10 @@ package org.apache.accumulo.core.security.crypto;
 /**
  * 
  */
-public interface SecretKeyEncryptionStrategy {
+public interface SecretKeyEncryptionStrategy {  
   
-  public SecretKeyEncryptionStrategyContext encryptSecretKey(SecretKeyEncryptionStrategyContext context);
-  public SecretKeyEncryptionStrategyContext decryptSecretKey(SecretKeyEncryptionStrategyContext context);
-  public SecretKeyEncryptionStrategyContext getNewContext();
+  public CryptoModuleParameters encryptSecretKey(CryptoModuleParameters params);
+  public CryptoModuleParameters decryptSecretKey(CryptoModuleParameters params);
   
   
 }


[31/50] [abbrv] git commit: ACCUMULO-1537 increase timeouts for build server

Posted by ct...@apache.org.
ACCUMULO-1537 increase timeouts for build server

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1502296 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/3b624e1a
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/3b624e1a
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/3b624e1a

Branch: refs/heads/ACCUMULO-1496
Commit: 3b624e1ab885011d5c99140fc364fd77624b4df2
Parents: fb839df
Author: Eric C. Newton <ec...@apache.org>
Authored: Thu Jul 11 17:47:55 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Thu Jul 11 17:47:55 2013 +0000

----------------------------------------------------------------------
 .../org/apache/accumulo/test/functional/BigRootTabletIT.java     | 2 +-
 .../org/apache/accumulo/test/functional/FateStarvationIT.java    | 2 +-
 .../org/apache/accumulo/test/functional/GarbageCollectorIT.java  | 4 ++--
 .../java/org/apache/accumulo/test/functional/LargeRowIT.java     | 2 +-
 .../org/apache/accumulo/test/functional/MetadataSplitIT.java     | 2 +-
 .../java/org/apache/accumulo/test/functional/ScanRangeIT.java    | 2 +-
 .../test/java/org/apache/accumulo/test/functional/TableIT.java   | 2 +-
 7 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/3b624e1a/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java b/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
index 6084aab..bd36bef 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/BigRootTabletIT.java
@@ -43,7 +43,7 @@ public class BigRootTabletIT extends MacTest {
     cfg.setSiteConfig(siteConfig);
   }
   
-  @Test(timeout = 60 * 1000)
+  @Test(timeout = 2 * 60 * 1000)
   public void test() throws Exception {
     Connector c = getConnector();
     c.tableOperations().addSplits(MetadataTable.NAME, FunctionalTestUtils.splits("0 1 2 3 4 5 6 7 8 9 a".split(" ")));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3b624e1a/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java b/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
index 1698193..454062f 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/FateStarvationIT.java
@@ -31,7 +31,7 @@ import org.junit.Test;
  */
 public class FateStarvationIT extends MacTest {
   
-  @Test(timeout=60*1000)
+  @Test(timeout=2 * 60 * 1000)
   public void run() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3b624e1a/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
index 3d1e787..ebaca28 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
@@ -56,7 +56,7 @@ public class GarbageCollectorIT extends MacTest {
     cfg.setSiteConfig(settings);
   }
   
-  @Test(timeout = 60 * 1000)
+  @Test(timeout = 2 * 60 * 1000)
   public void gcTest() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -83,7 +83,7 @@ public class GarbageCollectorIT extends MacTest {
     gc.destroy();
   }
   
-  @Test(timeout = 60 * 1000)
+  @Test(timeout = 2 * 60 * 1000)
   public void gcLotsOfCandidatesIT() throws Exception {
     log.info("Filling !METADATA table with bogus delete flags");
     Connector c = getConnector();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3b624e1a/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
index 6e06934..4777659 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/LargeRowIT.java
@@ -53,7 +53,7 @@ public class LargeRowIT extends MacTest {
   private static final int NUM_PRE_SPLITS = 9;
   private static final int SPLIT_THRESH = ROW_SIZE * NUM_ROWS / NUM_PRE_SPLITS;
   
-  @Test(timeout=60*1000)
+  @Test(timeout=2 * 60 * 1000)
   public void run() throws Exception {
     Random r = new Random();
     byte rowData[] = new byte[ROW_SIZE];

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3b624e1a/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
index 35f6251..b18324a 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MetadataSplitIT.java
@@ -35,7 +35,7 @@ public class MetadataSplitIT extends MacTest {
     cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "100ms"));
   }
  
-  @Test(timeout = 30 * 1000)
+  @Test(timeout = 60 * 1000)
   public void test() throws Exception {
     Connector c = getConnector();
     assertEquals(1, c.tableOperations().listSplits(MetadataTable.NAME).size());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3b624e1a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
index 94369a3..ce5b817 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
@@ -38,7 +38,7 @@ public class ScanRangeIT extends MacTest {
   private static final int CF_LIMIT = 5;
   private static final int ROW_LIMIT = 100;
   
-  @Test(timeout=30*1000)
+  @Test(timeout=60*1000)
   public void run() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("table1");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3b624e1a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
index cce2af5..e1243aa 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
@@ -43,7 +43,7 @@ import org.junit.Test;
 
 public class TableIT extends MacTest {
   
-  @Test(timeout = 60 * 1000)
+  @Test(timeout = 2 * 60 * 1000)
   public void test() throws Exception {
     Connector c = getConnector();
     TableOperations to = c.tableOperations();


[26/50] [abbrv] git commit: ACCUMULO-1537 moved integration tests to test/src/test; added more timeouts to tests

Posted by ct...@apache.org.
ACCUMULO-1537 moved integration tests to test/src/test; added more timeouts to tests

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1501750 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/868ef44a
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/868ef44a
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/868ef44a

Branch: refs/heads/ACCUMULO-1496
Commit: 868ef44a281f781313ac959d0eb9ef5ccabb900f
Parents: 7a1075a
Author: Eric C. Newton <ec...@apache.org>
Authored: Wed Jul 10 12:30:26 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Wed Jul 10 12:30:26 2013 +0000

----------------------------------------------------------------------
 .../test/functional/MasterFailoverIT.java       | 59 --------------------
 .../test/functional/SplitRecoveryIT.java        | 30 ----------
 .../test/functional/LateLastContactIT.java      |  2 +-
 .../test/functional/MasterFailoverIT.java       | 59 ++++++++++++++++++++
 .../accumulo/test/functional/NativeMapIT.java   |  2 +-
 .../accumulo/test/functional/RestartIT.java     | 14 ++---
 .../test/functional/SplitRecoveryIT.java        | 30 ++++++++++
 7 files changed, 97 insertions(+), 99 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/868ef44a/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
deleted file mode 100644
index 9934874..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.minicluster.MiniAccumuloConfig;
-import org.apache.accumulo.minicluster.ProcessReference;
-import org.apache.accumulo.minicluster.ServerType;
-import org.apache.accumulo.server.master.Master;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-import org.junit.Test;
-
-public class MasterFailoverIT extends MacTest {
-  
-  @Override
-  public void configure(MiniAccumuloConfig cfg) {
-    cfg.setSiteConfig(Collections.singletonMap(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s"));
-  }
-
-  @Test(timeout=30*1000)
-  public void test() throws Exception {
-    Connector c = getConnector();
-    c.tableOperations().create("test_ingest");
-    TestIngest.Opts opts = new TestIngest.Opts();
-    TestIngest.ingest(c, opts, BWOPTS);
-    for (ProcessReference master : cluster.getProcesses().get(ServerType.MASTER)) {
-      cluster.killProcess(ServerType.MASTER, master);
-    }
-    // start up a new one
-    Process p = cluster.exec(Master.class);
-    // talk to it
-    c.tableOperations().rename("test_ingest", "test_ingest2");
-    try {
-      VerifyIngest.Opts vopts = new VerifyIngest.Opts();
-      vopts.tableName = "test_ingest2";
-      VerifyIngest.verifyIngest(c, vopts, SOPTS);
-    } finally {
-      p.destroy();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/868ef44a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
deleted file mode 100644
index 6ada2c2..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import static org.junit.Assert.assertEquals;
-
-import org.junit.Test;
-
-public class SplitRecoveryIT extends MacTest {
-  
-  @Test(timeout=10*1000)
-  public void test() throws Exception {
-    assertEquals(0, cluster.exec(SplitRecoveryTest.class).waitFor());
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/868ef44a/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java b/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
index 7edb8df..24db37c 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/LateLastContactIT.java
@@ -35,7 +35,7 @@ public class LateLastContactIT extends MacTest {
     cfg.setSiteConfig(Collections.singletonMap(Property.GENERAL_RPC_TIMEOUT.getKey(), "2s"));
   }
 
-  @Test
+  @Test(timeout=60*1000)
   public void test() throws Exception {
     Process zombie = cluster.exec(ZombieTServer.class);
     assertEquals(0, zombie.waitFor());

http://git-wip-us.apache.org/repos/asf/accumulo/blob/868ef44a/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
new file mode 100644
index 0000000..9934874
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Collections;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.minicluster.ProcessReference;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.server.master.Master;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+public class MasterFailoverIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    cfg.setSiteConfig(Collections.singletonMap(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s"));
+  }
+
+  @Test(timeout=30*1000)
+  public void test() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    TestIngest.Opts opts = new TestIngest.Opts();
+    TestIngest.ingest(c, opts, BWOPTS);
+    for (ProcessReference master : cluster.getProcesses().get(ServerType.MASTER)) {
+      cluster.killProcess(ServerType.MASTER, master);
+    }
+    // start up a new one
+    Process p = cluster.exec(Master.class);
+    // talk to it
+    c.tableOperations().rename("test_ingest", "test_ingest2");
+    try {
+      VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+      vopts.tableName = "test_ingest2";
+      VerifyIngest.verifyIngest(c, vopts, SOPTS);
+    } finally {
+      p.destroy();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/868ef44a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java b/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
index b5b6953..ec00fe5 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/NativeMapIT.java
@@ -22,7 +22,7 @@ import org.junit.Test;
 
 public class NativeMapIT extends MacTest {
   
-  @Test
+  @Test(timeout=15*1000)
   public void test() throws Exception {
     assertEquals(0, cluster.exec(NativeMapTest.class).waitFor());
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/868ef44a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
index 9c6549e..523773e 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
@@ -48,7 +48,7 @@ public class RestartIT extends MacTest {
   private static final VerifyIngest.Opts VOPTS = new VerifyIngest.Opts();
   private static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
   
-  @Test
+  @Test(timeout=30*1000)
   public void restartMaster() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -64,7 +64,7 @@ public class RestartIT extends MacTest {
     ingest.destroy();
   }
   
-  @Test
+  @Test(timeout=30*1000)
   public void restartMasterRecovery() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -84,7 +84,7 @@ public class RestartIT extends MacTest {
     VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
   }
   
-  @Test
+  @Test(timeout=30*1000)
   public void restartMasterSplit() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -101,7 +101,7 @@ public class RestartIT extends MacTest {
     ingest.destroy();
   }
   
-  @Test
+  @Test(timeout=30*1000)
   public void killedTabletServer() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -116,7 +116,7 @@ public class RestartIT extends MacTest {
     }
   }
 
-  @Test
+  @Test(timeout=60*1000)
   public void killedTabletServerDuringShutdown() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -127,7 +127,7 @@ public class RestartIT extends MacTest {
     assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
   }
   
-  @Test
+  @Test(timeout=30*1000)
   public void shutdownDuringCompactingSplitting() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -139,6 +139,4 @@ public class RestartIT extends MacTest {
     VerifyIngest.verifyIngest(c, VOPTS, SOPTS);
     assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
   }
-  
-  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/868ef44a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
new file mode 100644
index 0000000..6ada2c2
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
+
+public class SplitRecoveryIT extends MacTest {
+  
+  @Test(timeout=10*1000)
+  public void test() throws Exception {
+    assertEquals(0, cluster.exec(SplitRecoveryTest.class).waitFor());
+  }
+  
+}


[21/50] [abbrv] git commit: ACCUMULO-1557 applying Jonathan Hsieh's patch

Posted by ct...@apache.org.
ACCUMULO-1557 applying Jonathan Hsieh's patch

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1500880 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/2d479640
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/2d479640
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/2d479640

Branch: refs/heads/ACCUMULO-1496
Commit: 2d4796403804aee174a67836190f11bf74dc2c5b
Parents: 78607f9 d7a7fbc
Author: Eric C. Newton <ec...@apache.org>
Authored: Mon Jul 8 18:49:34 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Mon Jul 8 18:49:34 2013 +0000

----------------------------------------------------------------------
 test/system/auto/simple/zooCacheTest.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------



[35/50] [abbrv] git commit: ACCUMULO-1537 increase timeouts, fix TableIT

Posted by ct...@apache.org.
ACCUMULO-1537 increase timeouts, fix TableIT

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1502336 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/f43930d0
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/f43930d0
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/f43930d0

Branch: refs/heads/ACCUMULO-1496
Commit: f43930d00298c3dde6a4abd75702b2d7007c2ce3
Parents: 65b5a3a
Author: Eric C. Newton <ec...@apache.org>
Authored: Thu Jul 11 19:33:08 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Thu Jul 11 19:33:08 2013 +0000

----------------------------------------------------------------------
 .../org/apache/accumulo/test/functional/MergeMetaIT.java  |  2 +-
 .../org/apache/accumulo/test/functional/ReadWriteIT.java  |  2 +-
 .../org/apache/accumulo/test/functional/RestartIT.java    | 10 +++++-----
 .../java/org/apache/accumulo/test/functional/TableIT.java |  1 +
 4 files changed, 8 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/f43930d0/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java b/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
index 2041232..f9c8b3c 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MergeMetaIT.java
@@ -37,7 +37,7 @@ import org.junit.Test;
 
 public class MergeMetaIT extends MacTest {
   
-  @Test(timeout = 30 * 1000)
+  @Test(timeout = 60 * 1000)
   public void mergeMeta() throws Exception {
     Connector c = getConnector();
     SortedSet<Text> splits = new TreeSet<Text>();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/f43930d0/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
index 7c74326..e6d08a6 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -232,7 +232,7 @@ public class ReadWriteIT extends MacTest {
     assertTrue(foundFile);
   }
   
-  @Test(timeout = 90 * 1000)
+  @Test(timeout = 2* 60 * 1000)
   public void localityGroupChange() throws Exception {
     // Make changes to locality groups and ensure nothing is lostssh
     final Connector connector = getConnector();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/f43930d0/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
index 523773e..2053916 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/RestartIT.java
@@ -48,7 +48,7 @@ public class RestartIT extends MacTest {
   private static final VerifyIngest.Opts VOPTS = new VerifyIngest.Opts();
   private static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
   
-  @Test(timeout=30*1000)
+  @Test(timeout=60*1000)
   public void restartMaster() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -64,7 +64,7 @@ public class RestartIT extends MacTest {
     ingest.destroy();
   }
   
-  @Test(timeout=30*1000)
+  @Test(timeout=60*1000)
   public void restartMasterRecovery() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -101,7 +101,7 @@ public class RestartIT extends MacTest {
     ingest.destroy();
   }
   
-  @Test(timeout=30*1000)
+  @Test(timeout= 60 * 1000)
   public void killedTabletServer() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -116,7 +116,7 @@ public class RestartIT extends MacTest {
     }
   }
 
-  @Test(timeout=60*1000)
+  @Test(timeout=2 * 60 * 1000)
   public void killedTabletServerDuringShutdown() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -127,7 +127,7 @@ public class RestartIT extends MacTest {
     assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
   }
   
-  @Test(timeout=30*1000)
+  @Test(timeout= 60 * 1000)
   public void shutdownDuringCompactingSplitting() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/f43930d0/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
index e1243aa..8f2244b 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
@@ -50,6 +50,7 @@ public class TableIT extends MacTest {
     to.create("test_ingest");
     TestIngest.Opts opts = new TestIngest.Opts();
     TestIngest.ingest(c, opts, new BatchWriterOpts());
+    to.flush("test_ingest", null, null, true);
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
     String id = to.tableIdMap().get("test_ingest");


[19/50] [abbrv] git commit: ACCUMULO-1552 applying Jonathan Hsieh's patch to fix the typo

Posted by ct...@apache.org.
ACCUMULO-1552 applying Jonathan Hsieh's patch to fix the typo

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1500723 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/78607f95
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/78607f95
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/78607f95

Branch: refs/heads/ACCUMULO-1496
Commit: 78607f95ed006093206b3ca2c2be64fa370af88a
Parents: 98791b9 86f1a22
Author: Eric C. Newton <ec...@apache.org>
Authored: Mon Jul 8 13:41:13 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Mon Jul 8 13:41:13 2013 +0000

----------------------------------------------------------------------
 test/system/continuous/continuous-env.sh.example | 2 +-
 test/system/continuous/run-verify.sh             | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------



[32/50] [abbrv] ACCUMULO-998 applying Micheal Allen's updated patch for at-rest encryption

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
index a23b6cc..ebdb2d1 100644
--- a/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
+++ b/core/src/test/java/org/apache/accumulo/core/file/rfile/RFileTest.java
@@ -20,10 +20,14 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.io.BufferedOutputStream;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
@@ -34,9 +38,13 @@ import java.util.Iterator;
 import java.util.Random;
 import java.util.Set;
 
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.SiteConfiguration;
 import org.apache.accumulo.core.data.ArrayByteSequence;
 import org.apache.accumulo.core.data.ByteSequence;
 import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
@@ -46,6 +54,10 @@ import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
 import org.apache.accumulo.core.file.rfile.RFile.Reader;
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 import org.apache.accumulo.core.iterators.system.ColumnFamilySkippingIterator;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.security.crypto.CryptoTest;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -58,10 +70,12 @@ import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.junit.Test;
 
+
 public class RFileTest {
   
   private static final Collection<ByteSequence> EMPTY_COL_FAMS = new ArrayList<ByteSequence>();
   
+  
   static {
     Logger.getLogger(org.apache.hadoop.io.compress.CodecPool.class).setLevel(Level.OFF);
     Logger.getLogger(org.apache.hadoop.util.NativeCodeLoader.class).setLevel(Level.OFF);
@@ -150,6 +164,8 @@ public class RFileTest {
   
   public static class TestRFile {
     
+    public File preGeneratedInputFile = null;
+    public File outputFile = null;
     private Configuration conf = CachedConfiguration.getInstance();
     public RFile.Writer writer;
     private ByteArrayOutputStream baos;
@@ -160,34 +176,56 @@ public class RFileTest {
     public SortedKeyValueIterator<Key,Value> iter;
 
     public void openWriter(boolean startDLG) throws IOException {
-      baos = new ByteArrayOutputStream();
-      dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
+      
+      if (outputFile == null) {
+        baos = new ByteArrayOutputStream();
+        
+        dos = new FSDataOutputStream(baos, new FileSystem.Statistics("a"));
+      } else {
+        BufferedOutputStream bufos = new BufferedOutputStream(new FileOutputStream(outputFile));
+        dos = new FSDataOutputStream(bufos, new FileSystem.Statistics("a"));       
+      }
       CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(dos, "gz", conf);
       writer = new RFile.Writer(_cbw, 1000, 1000);
       
       if (startDLG)
         writer.startDefaultLocalityGroup();
     }
-    
+        
     public void openWriter() throws IOException {
       openWriter(true);
     }
     
     public void closeWriter() throws IOException {
+      dos.flush();
       writer.close();
       dos.close();
-      baos.close();
+      if (baos != null) {
+        baos.close();
+      }
     }
     
     public void openReader() throws IOException {
-      byte[] data = baos.toByteArray();
+      
+      int fileLength = 0;
+      byte[] data = null;
+      if (preGeneratedInputFile != null) {
+        data = new byte[(int) preGeneratedInputFile.length()];
+        DataInputStream in = new DataInputStream(new FileInputStream(preGeneratedInputFile));
+        in.readFully(data);
+        in.close();
+      } else {
+        data = baos.toByteArray();
+      }
+      
       bais = new SeekableByteArrayInputStream(data);
       in = new FSDataInputStream(bais);
+      fileLength = data.length;
       
       LruBlockCache indexCache = new LruBlockCache(100000000, 100000);
       LruBlockCache dataCache = new LruBlockCache(100000000, 100000);
       
-      CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, conf, dataCache, indexCache);
+      CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, fileLength, conf, dataCache, indexCache);
       reader = new RFile.Reader(_cbr);
       iter = new ColumnFamilySkippingIterator(reader);
       
@@ -219,13 +257,13 @@ public class RFileTest {
   @Test
   public void test1() throws IOException {
     
-    // test an emprt file
+    // test an empty file
     
     TestRFile trf = new TestRFile();
     
     trf.openWriter();
     trf.closeWriter();
-    
+        
     trf.openReader();
     trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
     assertFalse(trf.iter.hasTop());
@@ -537,7 +575,7 @@ public class RFileTest {
   
   @Test
   public void test7() throws IOException {
-    // these test excercise setting the end key of a range
+    // these tests exercise setting the end key of a range
     
     TestRFile trf = new TestRFile();
     
@@ -1228,8 +1266,8 @@ public class RFileTest {
       count++;
       indexIter.next();
     }
-    
-    assert (count > 4);
+
+    assert(count > 4);
     
     trf.iter.seek(new Range(nk("r0000", "cf1", "cq1", "", 1), true, nk("r0001", "cf1", "cq1", "", 1), false), EMPTY_COL_FAMS, false);
     
@@ -1565,4 +1603,602 @@ public class RFileTest {
     
     reader.close();
   }
+  
+  
+  private AccumuloConfiguration setAndGetAccumuloConfig(String cryptoConfSetting) {  
+    @SuppressWarnings("deprecation")
+    AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
+    System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, cryptoConfSetting);
+    ((SiteConfiguration)conf).clearAndNull();
+    return conf;
+  }
+  
+  private void restoreOldConfiguration(String oldSiteConfigProperty, AccumuloConfiguration conf) {
+    if (oldSiteConfigProperty != null) {
+      System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, oldSiteConfigProperty);
+    } else {
+      System.clearProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    }
+    ((SiteConfiguration)conf).clearAndNull();
+  }
+
+
+  @Test
+  public void testEncRFile1() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test1();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+
+  @Test
+  public void testEncRFile2() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test2();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+  
+  @Test
+  public void testEncRFile3() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test3();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+
+  @Test
+  public void testEncRFile4() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test4();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+
+  @Test
+  public void testEncRFile5() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test5();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+
+  @Test
+  public void testEncRFile6() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test6();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+
+  @Test
+  public void testEncRFile7() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test7();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+
+  @Test
+  public void testEncRFile8() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test8();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+  
+  @Test
+  public void testEncRFile9() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test9();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+
+  @Test
+  public void testEncRFile10() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test10();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+
+  @Test
+  public void testEncRFile11() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test11();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+  
+  
+  @Test
+  public void testEncRFile12() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test12();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+  
+  @Test
+  public void testEncRFile13() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test13();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+
+  @Test
+  public void testEncRFile14() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test14();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+  
+  @Test
+  public void testEncRFile16() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test16();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+  
+  @Test
+  public void testEncRFile17() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test17();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+  
+  @Test
+  public void testEncRFile18() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test18();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+  
+  @Test
+  public void testEncRFile19() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CryptoTest.CRYPTO_ON_CONF);
+
+    test19();
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+
+  //@Test
+  public void testEncryptedRFiles() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    @SuppressWarnings("deprecation")
+    AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
+    System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, CryptoTest.CRYPTO_ON_CONF);
+    ((SiteConfiguration)conf).clearAndNull();
+    
+    test1();
+    test2();
+    test3();
+    test4();
+    test5();
+    test6();
+    test7();
+    test8();
+    
+    
+    if (oldSiteConfigProperty != null) {
+      System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, oldSiteConfigProperty);
+    } else {
+      System.clearProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    }
+    ((SiteConfiguration)conf).clearAndNull();
+  }
+  
+  //@Test
+  public void testRootTabletFromServer() throws Exception {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    @SuppressWarnings("deprecation")
+    AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
+    System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, CryptoTest.CRYPTO_ON_CONF);
+    ((SiteConfiguration)conf).clearAndNull();
+
+    TestRFile trf = new TestRFile();
+    trf.preGeneratedInputFile = new File("/tmp/should_work.rf");
+    
+    trf.openReader();
+    trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
+    assert(trf.iter.hasTop());
+    
+    assert(trf.reader.getLastKey() != null);
+    
+    trf.closeReader();
+   
+    
+    if (oldSiteConfigProperty != null) {
+      System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, oldSiteConfigProperty);
+    } else {
+      System.clearProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    }
+    ((SiteConfiguration)conf).clearAndNull();
+
+  }  
+  
+  private static final int TOTAL_NUM_ROWS = 10;
+  private static final int ROW_MOD_VALUE = 10;
+  
+  //@Test
+  // These tests will purge the disk cache when the run, so it's not recommended that they be run in development systems.
+  public void testEncryptedRFileWriteSpeed() throws Exception {
+
+    
+    System.out.println("Unencrypted Write, Unencrypted Read (Cache), Unencrypted Read (FS only), Encrypted Write, Encrypted Read (Cache), Encrypted Read (FS Only)");
+    int numIterations = 1;
+    
+    for (int i = 0; i < numIterations; i++) {
+      @SuppressWarnings("deprecation")
+      AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
+      System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, CryptoTest.CRYPTO_OFF_CONF);
+      ((SiteConfiguration)conf).clearAndNull();
+   
+      TestRFile trf = new TestRFile();
+      trf.outputFile = new File("/tmp/testUnencryptedRfile.rf");
+      trf.openWriter();
+      
+      
+
+      double timeTickSize = 1000.0;
+      int numRowsRead = 0;
+
+
+      try {
+        
+        performUnencryptedTests(trf, TOTAL_NUM_ROWS, ROW_MOD_VALUE, timeTickSize, true);
+        
+        performEncryptedTests(TOTAL_NUM_ROWS, ROW_MOD_VALUE, timeTickSize, numRowsRead, false);
+        
+      } catch (Exception e) {
+        e.printStackTrace();
+        throw e;
+      }      
+      
+     
+      
+    }
+    
+  }
+
+  private void performUnencryptedTests(TestRFile trf, int totalNumRows, int rowModValue, double timeTickSize, boolean first) throws IOException, InterruptedException {
+    long start = System.currentTimeMillis();
+    
+    
+    writeRowsToRfile(trf, totalNumRows, rowModValue);
+    
+    long end = System.currentTimeMillis();
+    
+    System.out.print(""+((end - start) / timeTickSize) + ", ");
+    
+    trf.preGeneratedInputFile = trf.outputFile;
+          
+    start = System.currentTimeMillis();
+ 
+    trf.openReader();
+    trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
+    int numRowsRead = 0;
+    
+    int numRowsToRead = totalNumRows;
+    while (numRowsRead < TOTAL_NUM_ROWS) {
+      int numRowsReadThisTime = readRandomRowsFromRfile(trf, totalNumRows, numRowsToRead);
+      
+      numRowsToRead -= numRowsReadThisTime;
+      numRowsRead += numRowsReadThisTime;
+    }
+    
+    trf.closeReader();
+    
+    end = System.currentTimeMillis();
+    
+    System.out.print(""+((end - start) / timeTickSize) + ", ");
+
+    Runtime.getRuntime().exec("purge").waitFor();
+
+    start = System.currentTimeMillis();
+    
+    trf.openReader();
+    trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
+    numRowsRead = 0;
+    
+    numRowsToRead = totalNumRows;
+    while (numRowsRead < TOTAL_NUM_ROWS) {
+      int numRowsReadThisTime = readRandomRowsFromRfile(trf, totalNumRows, numRowsToRead);
+      
+      numRowsToRead -= numRowsReadThisTime;
+      numRowsRead += numRowsReadThisTime;
+    }
+    
+    trf.closeReader();
+    
+    end = System.currentTimeMillis();
+    
+    
+    if (first) {
+      System.out.print(""+((end - start) / timeTickSize)+", ");
+    } else {
+      System.out.println(""+((end - start) / timeTickSize));
+      
+    }
+    
+    
+    
+    //trf.outputFile.delete();
+  }
+
+  @SuppressWarnings("deprecation")
+  private void performEncryptedTests(int totalNumRows, int rowModValue, double timeTickSize, int numRowsRead, boolean first) throws IOException, InterruptedException {
+    AccumuloConfiguration conf;
+    TestRFile trf;
+    long start;
+    long end;
+    int numRowsToRead;
+    
+    conf = AccumuloConfiguration.getSiteConfiguration();
+    System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, CryptoTest.CRYPTO_ON_CONF);
+    ((SiteConfiguration)conf).clearAndNull();
+    
+    trf = new TestRFile();
+    trf.outputFile = new File("/tmp/testEncryptedRfile.rf");
+    trf.openWriter();
+    
+    start = System.currentTimeMillis();
+    
+    writeRowsToRfile(trf, totalNumRows, rowModValue);
+    
+    end = System.currentTimeMillis();
+ 
+    System.out.print(""+((end - start) / timeTickSize) + ", ");
+ 
+    trf.preGeneratedInputFile = trf.outputFile;
+    
+    start = System.currentTimeMillis();
+ 
+    trf.openReader();
+    trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
+    
+    numRowsToRead = totalNumRows;
+    while (numRowsRead < TOTAL_NUM_ROWS) {
+      int numRowsReadThisTime = readRandomRowsFromRfile(trf, totalNumRows, numRowsToRead);
+      
+      numRowsToRead -= numRowsReadThisTime;
+      numRowsRead += numRowsReadThisTime;
+    }
+    
+    trf.closeReader();
+    
+    end = System.currentTimeMillis();
+    
+    System.out.print(""+((end - start) / timeTickSize)+", ");
+
+    Runtime.getRuntime().exec("purge").waitFor();
+
+    start = System.currentTimeMillis();
+    
+    trf.openReader();
+    trf.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
+    numRowsRead = 0;
+    
+    numRowsToRead = totalNumRows;
+    while (numRowsRead < TOTAL_NUM_ROWS) {
+      int numRowsReadThisTime = readRandomRowsFromRfile(trf, totalNumRows, numRowsToRead);
+      
+      numRowsToRead -= numRowsReadThisTime;
+      numRowsRead += numRowsReadThisTime;
+    }
+    
+    trf.closeReader();
+    
+    end = System.currentTimeMillis();
+    
+    if (first) {
+      System.out.print(""+((end - start) / timeTickSize)+", ");
+    } else {
+      System.out.println(""+((end - start) / timeTickSize));
+      
+    }
+    
+    
+    trf.outputFile.delete();
+  }
+
+  private int readRandomRowsFromRfile(TestRFile trf, int totalRowCount, int maxRowsToRead) throws IOException {
+    if (maxRowsToRead <= 0) {
+      return 0;
+    }
+    
+    int numRowsRead = 0;
+    Random rand = new Random(System.nanoTime());
+    
+    int firstKeyNum = Math.abs(rand.nextInt()) % totalRowCount;
+    //int lastKeyNum = Math.abs(rand.nextInt()) % totalRowCount;
+    int lastKeyNum = firstKeyNum + 1;
+    
+    if (lastKeyNum >= totalRowCount) {
+      lastKeyNum = firstKeyNum;
+    }
+    
+    if (lastKeyNum < firstKeyNum) {
+      int temp = lastKeyNum;
+      lastKeyNum = firstKeyNum;
+      firstKeyNum = temp;
+    }
+    
+    if (lastKeyNum - firstKeyNum > maxRowsToRead) {
+      lastKeyNum = firstKeyNum + maxRowsToRead;
+    }
+    
+    Key firstKey = nk(nf("r_", firstKeyNum), "cf_0", "cq_0", "vis", 0L);
+    Key lastKey = nk(nf("r_", lastKeyNum), "cf_19", "cq_19", "vis", 0L);
+    
+    trf.iter.seek(new Range(firstKey, lastKey), EMPTY_COL_FAMS, false);
+    for (int i = firstKeyNum; i < lastKeyNum; i++) {
+      @SuppressWarnings("unused")
+      Key k = trf.iter.getTopKey();
+      @SuppressWarnings("unused")
+      Value v = trf.iter.getTopValue();
+      
+      trf.iter.next();
+      
+      numRowsRead++;
+    }
+    
+    return numRowsRead;
+    
+  }
+
+  private void writeRowsToRfile(TestRFile trf, int numRowsToWriteAndRead, int rowModValue) throws IOException {
+    for (int i = 0; i < numRowsToWriteAndRead; i++) {
+      String rowID = nf("r_", (i % rowModValue));
+      String colFam = nf("cf_", (i % 20));
+      String colQual = nf("cq_", (i % 20));
+      String colVis = "vis";
+      
+      Key k = nk(rowID, colFam, colQual, colVis, i);
+      Value v = nv(""+i);
+      
+      trf.writer.append(k, v);
+    }
+    
+    trf.closeWriter();
+  }
+    
+  
+  @Test
+  public void testRootTabletEncryption() throws Exception {
+    
+    // This tests that the normal set of operations used to populate a root tablet 
+    
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    @SuppressWarnings("deprecation")
+    AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
+    System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, CryptoTest.CRYPTO_ON_CONF);
+    ((SiteConfiguration)conf).clearAndNull();
+
+    // populate the root tablet with info about the default tablet
+    // the root tablet contains the key extent and locations of all the
+    // metadata tablets
+    //String initRootTabFile = ServerConstants.getMetadataTableDir() + "/root_tablet/00000_00000."
+      //  + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
+    //FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, fs, conf, AccumuloConfiguration.getDefaultConfiguration());
+    
+    TestRFile testRfile = new TestRFile();
+    testRfile.openWriter();
+    
+    RFile.Writer mfw = testRfile.writer;
+    
+    // mfw.startDefaultLocalityGroup();
+    
+    //mfw.startDefaultLocalityGroup();
+    
+    Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
+    
+    // table tablet's directory
+    Key tableDirKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
+        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
+    mfw.append(tableDirKey, new Value(/*TABLE_TABLETS_TABLET_DIR*/"/table_info".getBytes()));
+    
+    // table tablet time
+    Key tableTimeKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
+        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
+    mfw.append(tableTimeKey, new Value((/*TabletTime.LOGICAL_TIME_ID*/ 'L' + "0").getBytes()));
+    
+    // table tablet's prevrow
+    Key tablePrevRowKey = new Key(tableExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
+        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
+    mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(null));
+    
+    // ----------] default tablet info
+    Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), null));
+    
+    // default's directory
+    Key defaultDirKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
+        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
+    mfw.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes()));
+    
+    // default's time
+    Key defaultTimeKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
+        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
+    mfw.append(defaultTimeKey, new Value((/*TabletTime.LOGICAL_TIME_ID*/ 'L' + "0").getBytes()));
+    
+    // default's prevrow
+    Key defaultPrevRowKey = new Key(defaultExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
+        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
+    mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));
+    
+    
+    testRfile.closeWriter();
+    
+    if (true) {
+      FileOutputStream fileOutputStream = new FileOutputStream(new File("/tmp/testEncryptedRootFile.rf"));
+      fileOutputStream.write(testRfile.baos.toByteArray());
+      fileOutputStream.flush();
+      fileOutputStream.close();
+    }
+    
+    
+    
+    
+    testRfile.openReader();
+    testRfile.iter.seek(new Range((Key) null, null), EMPTY_COL_FAMS, false);
+    assert(testRfile.iter.hasTop());
+    
+    assert(testRfile.reader.getLastKey() != null);
+    
+    
+    
+    
+    testRfile.closeReader();
+
+    if (oldSiteConfigProperty != null) {
+      System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, oldSiteConfigProperty);
+    } else {
+      System.clearProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    }
+    ((SiteConfiguration)conf).clearAndNull();
+
+  }  
 }
+

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/test/java/org/apache/accumulo/core/security/crypto/CryptoTest.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/accumulo/core/security/crypto/CryptoTest.java b/core/src/test/java/org/apache/accumulo/core/security/crypto/CryptoTest.java
new file mode 100644
index 0000000..d37a7e0
--- /dev/null
+++ b/core/src/test/java/org/apache/accumulo/core/security/crypto/CryptoTest.java
@@ -0,0 +1,390 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.core.security.crypto;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.InvalidKeyException;
+import java.security.Key;
+import java.security.NoSuchAlgorithmException;
+import java.security.NoSuchProviderException;
+import java.security.SecureRandom;
+import java.util.Arrays;
+
+import javax.crypto.BadPaddingException;
+import javax.crypto.Cipher;
+import javax.crypto.IllegalBlockSizeException;
+import javax.crypto.NoSuchPaddingException;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.SiteConfiguration;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+public class CryptoTest {
+  
+  private static final int MARKER_INT = 0xCADEFEDD;
+  private static final String MARKER_STRING = "1 2 3 a b c";
+  public static final String CONFIG_FILE_SYSTEM_PROP = "org.apache.accumulo.config.file";
+  public static final String CRYPTO_ON_CONF = "crypto-on-accumulo-site.xml";
+  public static final String CRYPTO_OFF_CONF = "crypto-off-accumulo-site.xml";
+  public static final String CRYPTO_ON_KEK_OFF_CONF = "crypto-on-no-key-encryption-accumulo-site.xml"; 
+  
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+  
+  @Test
+  public void testNoCryptoStream() throws IOException {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CRYPTO_OFF_CONF);    
+    
+    CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+    
+    assertNotNull(params);
+    assertEquals("NullCipher", params.getAlgorithmName());
+    assertNull(params.getEncryptionMode());
+    assertNull(params.getPadding());
+    
+    CryptoModule cryptoModule = CryptoModuleFactory.getCryptoModule(conf);
+    assertNotNull(cryptoModule);
+    assertTrue(cryptoModule instanceof CryptoModuleFactory.NullCryptoModule);
+    
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    
+    params.setPlaintextOutputStream(out);
+    
+    params = cryptoModule.getEncryptingOutputStream(params);
+    assertNotNull(params.getEncryptedOutputStream());
+    assertEquals(out, params.getEncryptedOutputStream());
+    
+
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+  
+  @Test
+  public void testCryptoModuleParamsParsing() {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CRYPTO_ON_CONF);    
+
+    CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+    
+    assertNotNull(params);
+    assertEquals("AES", params.getAlgorithmName());
+    assertEquals("CFB", params.getEncryptionMode());
+    assertEquals("PKCS5Padding", params.getPadding());
+    assertEquals(128, params.getKeyLength());
+    assertEquals("SHA1PRNG", params.getRandomNumberGenerator());
+    assertEquals("SUN", params.getRandomNumberGeneratorProvider());
+    assertEquals("org.apache.accumulo.core.security.crypto.DefaultSecretKeyEncryptionStrategy", params.getKeyEncryptionStrategyClass());
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);    
+  }
+  
+  @Test
+  public void testCryptoModuleParamsValidation1() throws IOException {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CRYPTO_ON_CONF);    
+   
+    try {
+      
+      CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+      CryptoModule cryptoModule = CryptoModuleFactory.getCryptoModule(conf);
+      
+      assertTrue(cryptoModule instanceof DefaultCryptoModule);
+      
+      exception.expect(RuntimeException.class);
+      cryptoModule.getEncryptingOutputStream(params);
+      
+      
+    } finally {
+      restoreOldConfiguration(oldSiteConfigProperty, conf);             
+    }
+  }
+
+  @Test
+  public void testCryptoModuleParamsValidation2() throws IOException {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CRYPTO_ON_CONF);    
+   
+    try {
+      
+      CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+      CryptoModule cryptoModule = CryptoModuleFactory.getCryptoModule(conf);
+      
+      assertTrue(cryptoModule instanceof DefaultCryptoModule);
+      
+      exception.expect(RuntimeException.class);
+      cryptoModule.getDecryptingInputStream(params);
+    } finally {
+      restoreOldConfiguration(oldSiteConfigProperty, conf);             
+    }
+  }
+  
+  private String getStringifiedBytes(String s) throws IOException {
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    DataOutputStream dataOut = new DataOutputStream(out);
+    
+    dataOut.writeUTF(s);
+    dataOut.close();
+    byte[] stringMarkerBytes = out.toByteArray();
+    return Arrays.toString(stringMarkerBytes);
+    
+  }
+  
+  private String getStringifiedBytes(int i) throws IOException {
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    DataOutputStream dataOut = new DataOutputStream(out);
+    
+    dataOut.writeInt(i);
+    dataOut.close();
+    byte[] stringMarkerBytes = out.toByteArray();
+    return Arrays.toString(stringMarkerBytes);
+    
+  }
+
+  @Test
+  public void testCryptoModuleBasicReadWrite() throws IOException {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CRYPTO_ON_KEK_OFF_CONF);    
+  
+    CryptoModule cryptoModule = CryptoModuleFactory.getCryptoModule(conf);
+    CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+    
+    assertTrue(cryptoModule instanceof DefaultCryptoModule);
+    assertTrue(params.getKeyEncryptionStrategyClass() == null || params.getKeyEncryptionStrategyClass().equals(""));
+    
+    byte[] resultingBytes = setUpSampleEncryptedBytes(cryptoModule, params);
+    
+    // If we get here, we have encrypted bytes
+    ByteArrayInputStream in = new ByteArrayInputStream(resultingBytes);
+    
+    params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+    params.setEncryptedInputStream(in);
+    
+    params = cryptoModule.getDecryptingInputStream(params);
+    
+    InputStream plaintextIn = params.getPlaintextInputStream();
+    
+    assertNotNull(plaintextIn);
+    assertTrue(plaintextIn != in);
+    DataInputStream dataIn = new DataInputStream(plaintextIn);
+    String markerString = dataIn.readUTF();
+    int markerInt = dataIn.readInt();
+    
+    assertEquals(MARKER_STRING, markerString);
+    assertEquals(MARKER_INT, markerInt);
+    
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+
+  private byte[] setUpSampleEncryptedBytes(CryptoModule cryptoModule, CryptoModuleParameters params) throws IOException {
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    
+    params.setPlaintextOutputStream(out);
+    
+    params = cryptoModule.getEncryptingOutputStream(params);
+    
+    assertNotNull(params.getEncryptedOutputStream());
+    assertTrue(params.getEncryptedOutputStream() != out);
+    
+    DataOutputStream dataOut = new DataOutputStream(params.getEncryptedOutputStream());
+    dataOut.writeUTF(MARKER_STRING);
+    dataOut.writeInt(MARKER_INT);
+    dataOut.close();
+    
+    byte[] resultingBytes = out.toByteArray();
+    String stringifiedBytes = Arrays.toString(resultingBytes);
+    
+    String stringifiedMarkerBytes = getStringifiedBytes(MARKER_STRING);
+    String stringifiedOtherBytes = getStringifiedBytes(MARKER_INT);
+    
+    
+    // OK, let's make sure it's encrypted
+    assertTrue(!stringifiedBytes.contains(stringifiedMarkerBytes));
+    assertTrue(!stringifiedBytes.contains(stringifiedOtherBytes));
+    return resultingBytes;
+  }
+  
+  @Test
+  public void testKeyEncryptionAndCheckThatFileCannotBeReadWithoutKEK() throws IOException {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CRYPTO_ON_CONF);    
+  
+    CryptoModule cryptoModule = CryptoModuleFactory.getCryptoModule(conf);
+    CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+
+    assertTrue(cryptoModule instanceof DefaultCryptoModule);
+    assertNotNull(params.getKeyEncryptionStrategyClass());
+    assertEquals("org.apache.accumulo.core.security.crypto.DefaultSecretKeyEncryptionStrategy", params.getKeyEncryptionStrategyClass());
+    
+    byte[] resultingBytes = setUpSampleEncryptedBytes(cryptoModule, params);
+
+    // So now that we have bytes encrypted by a key encrypted to a KEK, turn off the KEK configuration and try
+    // to decrypt.  We expect this to fail.  This also tests our ability to override the key encryption strategy.
+    conf = setAndGetAccumuloConfig(CRYPTO_ON_KEK_OFF_CONF);
+    params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+    params.setOverrideStreamsSecretKeyEncryptionStrategy(true);
+    
+    ByteArrayInputStream in = new ByteArrayInputStream(resultingBytes);
+    params.setEncryptedInputStream(in);
+    
+    params = cryptoModule.getDecryptingInputStream(params);
+    
+    assertNotNull(params.getPlaintextInputStream());
+    DataInputStream dataIn = new DataInputStream(params.getPlaintextInputStream());
+    // We expect the following operation to fail and throw an exception
+    try {
+      exception.expect(IOException.class);
+      @SuppressWarnings("unused")
+      String markerString = dataIn.readUTF();
+    }
+    finally {
+      restoreOldConfiguration(oldSiteConfigProperty, conf);      
+    }
+ }
+
+  @Test
+  public void testKeyEncryptionNormalPath() throws IOException {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CRYPTO_ON_CONF);    
+
+    CryptoModule cryptoModule = CryptoModuleFactory.getCryptoModule(conf);
+    CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+
+    assertTrue(cryptoModule instanceof DefaultCryptoModule);
+    assertNotNull(params.getKeyEncryptionStrategyClass());
+    assertEquals("org.apache.accumulo.core.security.crypto.DefaultSecretKeyEncryptionStrategy", params.getKeyEncryptionStrategyClass());
+    
+    byte[] resultingBytes = setUpSampleEncryptedBytes(cryptoModule, params);
+
+    params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+    params.setOverrideStreamsSecretKeyEncryptionStrategy(true);
+    
+    ByteArrayInputStream in = new ByteArrayInputStream(resultingBytes);
+    params.setEncryptedInputStream(in);
+    
+    params = cryptoModule.getDecryptingInputStream(params);
+    
+    assertNotNull(params.getPlaintextInputStream());
+    DataInputStream dataIn = new DataInputStream(params.getPlaintextInputStream());
+
+    String markerString = dataIn.readUTF();
+    int markerInt = dataIn.readInt();
+    
+    assertEquals(MARKER_STRING, markerString);
+    assertEquals(MARKER_INT, markerInt);
+
+    restoreOldConfiguration(oldSiteConfigProperty, conf);
+  }
+  
+  @Test
+  public void testChangingCryptoParamsAndCanStillDecryptPreviouslyEncryptedFiles() throws IOException {
+    String oldSiteConfigProperty = System.getProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    AccumuloConfiguration conf = setAndGetAccumuloConfig(CRYPTO_ON_CONF);    
+
+    CryptoModule cryptoModule = CryptoModuleFactory.getCryptoModule(conf);
+    CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+
+    assertTrue(cryptoModule instanceof DefaultCryptoModule);
+    assertNotNull(params.getKeyEncryptionStrategyClass());
+    assertEquals("org.apache.accumulo.core.security.crypto.DefaultSecretKeyEncryptionStrategy", params.getKeyEncryptionStrategyClass());
+    
+    byte[] resultingBytes = setUpSampleEncryptedBytes(cryptoModule, params);
+
+    // Now we're going to create a params object and set its algorithm and key length different
+    // from those configured within the site configuration.  After doing this, we should
+    // still be able to read the file that was created with a different set of parameters.
+    params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+    params.setAlgorithmName("DESede");
+    params.setKeyLength(24 * 8);
+    
+    ByteArrayInputStream in = new ByteArrayInputStream(resultingBytes);
+    params.setEncryptedInputStream(in);
+    
+    params = cryptoModule.getDecryptingInputStream(params);
+    
+    assertNotNull(params.getPlaintextInputStream());
+    DataInputStream dataIn = new DataInputStream(params.getPlaintextInputStream());
+    String markerString = dataIn.readUTF();
+    int markerInt = dataIn.readInt();
+    
+    assertEquals(MARKER_STRING, markerString);
+    assertEquals(MARKER_INT, markerInt);
+
+    restoreOldConfiguration(oldSiteConfigProperty, conf);   
+  }
+  
+  private void restoreOldConfiguration(String oldSiteConfigProperty, AccumuloConfiguration conf) {
+    if (oldSiteConfigProperty != null) {
+      System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, oldSiteConfigProperty);
+    } else {
+      System.clearProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP);
+    }
+    ((SiteConfiguration)conf).clearAndNull();
+  }
+
+
+
+  private AccumuloConfiguration setAndGetAccumuloConfig(String cryptoConfSetting) {  
+    @SuppressWarnings("deprecation")
+    AccumuloConfiguration conf = AccumuloConfiguration.getSiteConfiguration();
+    System.setProperty(CryptoTest.CONFIG_FILE_SYSTEM_PROP, cryptoConfSetting);
+    ((SiteConfiguration)conf).clearAndNull();
+    return conf;
+  }
+  
+  @Test
+  public void testKeyWrapAndUnwrap() throws NoSuchAlgorithmException, NoSuchPaddingException, NoSuchProviderException, InvalidKeyException, IllegalBlockSizeException, BadPaddingException {
+    Cipher keyWrapCipher = Cipher.getInstance("AES/ECB/NoPadding");
+    SecureRandom random = SecureRandom.getInstance("SHA1PRNG", "SUN");
+    
+    byte[] kek = new byte[16];
+    random.nextBytes(kek);
+    byte[] randomKey = new byte[16];
+    random.nextBytes(randomKey);
+    
+    keyWrapCipher.init(Cipher.WRAP_MODE, new SecretKeySpec(kek, "AES"));
+    
+    Key randKey = new SecretKeySpec(randomKey, "AES");
+    
+    byte[] wrappedKey = keyWrapCipher.wrap(randKey);
+    
+    assert(wrappedKey != null);
+    assert(wrappedKey.length == randomKey.length);
+
+    
+    Cipher keyUnwrapCipher = Cipher.getInstance("AES/ECB/NoPadding");
+    keyUnwrapCipher.init(Cipher.UNWRAP_MODE, new SecretKeySpec(kek, "AES"));
+    Key unwrappedKey = keyUnwrapCipher.unwrap(wrappedKey, "AES", Cipher.SECRET_KEY);
+    
+    byte[] unwrappedKeyBytes = unwrappedKey.getEncoded();
+    assert(Arrays.equals(unwrappedKeyBytes, randomKey));
+    
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/test/resources/crypto-off-accumulo-site.xml
----------------------------------------------------------------------
diff --git a/core/src/test/resources/crypto-off-accumulo-site.xml b/core/src/test/resources/crypto-off-accumulo-site.xml
new file mode 100644
index 0000000..667e9a3
--- /dev/null
+++ b/core/src/test/resources/crypto-off-accumulo-site.xml
@@ -0,0 +1,111 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+	<!--
+  Put your site-specific accumulo configurations here.
+
+  The available configuration values along with their defaults
+  are documented in docs/config.html
+
+  Unless you are simply testing at your workstation, you will most 
+  definitely need to change the three entries below.
+	-->
+
+    <property>
+      <name>instance.zookeeper.host</name>
+      <value>localhost:2181</value>
+      <description>comma separated list of zookeeper servers</description>
+    </property>
+
+    <property>
+      <name>logger.dir.walog</name>
+      <value>walogs</value>
+      <description>The directory used to store write-ahead logs on the local filesystem. It is possible to specify a comma-separated list of directories.</description>
+    </property>
+    
+    <property>
+      <name>instance.secret</name>
+      <value>DEFAULT</value>
+      <description>A secret unique to a given instance that all servers must know in order to communicate with one another. 
+                   Change it before initialization. To change it later use ./bin/accumulo org.apache.accumulo.server.util.ChangeSecret [oldpasswd] [newpasswd], 
+                   and then update this file.
+      </description>
+    </property>
+
+    <property>
+      <name>tserver.memory.maps.max</name>
+      <value>80M</value>
+    </property>
+    
+    <property>
+      <name>tserver.cache.data.size</name>
+      <value>7M</value>
+    </property>
+    
+    <property>
+      <name>tserver.cache.index.size</name>
+      <value>20M</value>
+    </property>
+    
+    <property>
+      <name>trace.password</name>
+      <!-- 
+        change this to the root user's password, and/or change the user below 
+       -->
+      <value>password</value>
+    </property>
+    
+    <property>
+      <name>trace.user</name>
+      <value>root</value>
+    </property>
+    
+    <property>
+      <name>tserver.sort.buffer.size</name>
+      <value>50M</value>
+    </property>
+    
+    <property>
+      <name>tserver.walog.max.size</name>
+      <value>100M</value>
+    </property>
+
+    <property>
+      <name>general.classpaths</name>
+      <value>
+    $ACCUMULO_HOME/server/target/classes/,
+    $ACCUMULO_HOME/core/target/classes/,
+    $ACCUMULO_HOME/start/target/classes/,
+    $ACCUMULO_HOME/fate/target/classes/,
+    $ACCUMULO_HOME/proxy/target/classes/,
+    $ACCUMULO_HOME/examples/target/classes/,
+	$ACCUMULO_HOME/lib/[^.].$ACCUMULO_VERSION.jar,
+	$ACCUMULO_HOME/lib/[^.].*.jar,
+	$ZOOKEEPER_HOME/zookeeper[^.].*.jar,
+	$HADOOP_CONF_DIR,
+	$HADOOP_PREFIX/[^.].*.jar,
+	$HADOOP_PREFIX/lib/[^.].*.jar,
+      </value>
+      <description>Classpaths that accumulo checks for updates and class files.
+      When using the Security Manager, please remove the ".../target/classes/" values.
+      </description>
+    </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/test/resources/crypto-on-accumulo-site.xml
----------------------------------------------------------------------
diff --git a/core/src/test/resources/crypto-on-accumulo-site.xml b/core/src/test/resources/crypto-on-accumulo-site.xml
new file mode 100644
index 0000000..9dc4aac
--- /dev/null
+++ b/core/src/test/resources/crypto-on-accumulo-site.xml
@@ -0,0 +1,164 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+	<!--
+  Put your site-specific accumulo configurations here.
+
+  The available configuration values along with their defaults
+  are documented in docs/config.html
+
+  Unless you are simply testing at your workstation, you will most 
+  definitely need to change the three entries below.
+	-->
+
+    <property>
+      <name>instance.zookeeper.host</name>
+      <value>localhost:2181</value>
+      <description>comma separated list of zookeeper servers</description>
+    </property>
+
+    <property>
+      <name>logger.dir.walog</name>
+      <value>walogs</value>
+      <description>The directory used to store write-ahead logs on the local filesystem. It is possible to specify a comma-separated list of directories.</description>
+    </property>
+    
+    <property>
+      <name>instance.secret</name>
+      <value>DEFAULT</value>
+      <description>A secret unique to a given instance that all servers must know in order to communicate with one another. 
+                   Change it before initialization. To change it later use ./bin/accumulo org.apache.accumulo.server.util.ChangeSecret [oldpasswd] [newpasswd], 
+                   and then update this file.
+      </description>
+    </property>
+
+    <property>
+      <name>tserver.memory.maps.max</name>
+      <value>80M</value>
+    </property>
+    
+    <property>
+      <name>tserver.cache.data.size</name>
+      <value>7M</value>
+    </property>
+    
+    <property>
+      <name>tserver.cache.index.size</name>
+      <value>20M</value>
+    </property>
+    
+    <property>
+      <name>trace.password</name>
+      <!-- 
+        change this to the root user's password, and/or change the user below 
+       -->
+      <value>password</value>
+    </property>
+    
+    <property>
+      <name>trace.user</name>
+      <value>root</value>
+    </property>
+    
+    <property>
+      <name>tserver.sort.buffer.size</name>
+      <value>50M</value>
+    </property>
+    
+    <property>
+      <name>tserver.walog.max.size</name>
+      <value>100M</value>
+    </property>
+
+    <property>
+      <name>general.classpaths</name>
+      <value>
+    $ACCUMULO_HOME/server/target/classes/,
+    $ACCUMULO_HOME/core/target/classes/,
+    $ACCUMULO_HOME/start/target/classes/,
+    $ACCUMULO_HOME/fate/target/classes/,
+    $ACCUMULO_HOME/proxy/target/classes/,
+    $ACCUMULO_HOME/examples/target/classes/,
+	$ACCUMULO_HOME/lib/[^.].$ACCUMULO_VERSION.jar,
+	$ACCUMULO_HOME/lib/[^.].*.jar,
+	$ZOOKEEPER_HOME/zookeeper[^.].*.jar,
+	$HADOOP_CONF_DIR,
+	$HADOOP_PREFIX/[^.].*.jar,
+	$HADOOP_PREFIX/lib/[^.].*.jar,
+      </value>
+      <description>Classpaths that accumulo checks for updates and class files.
+      When using the Security Manager, please remove the ".../target/classes/" values.
+      </description>
+    </property>
+
+    <property>
+      <name>crypto.module.class</name>
+      <value>org.apache.accumulo.core.security.crypto.DefaultCryptoModule</value>
+    </property>
+    <property>
+      <name>crypto.cipher.suite</name>
+      <value>AES/CFB/PKCS5Padding</value>
+    </property>
+    <property>
+      <name>crypto.cipher.algorithm.name</name>
+      <value>AES</value>
+    </property>
+    <property>
+      <name>crypto.cipher.key.length</name>
+      <value>128</value>
+    </property>
+    <property>
+      <name>crypto.secure.rng</name>
+      <value>SHA1PRNG</value>
+    </property>
+    <property>
+      <name>crypto.secure.rng.provider</name>
+      <value>SUN</value>
+    </property>
+    <property>
+      <name>crypto.secret.key.encryption.strategy.class</name>
+      <value>org.apache.accumulo.core.security.crypto.DefaultSecretKeyEncryptionStrategy</value>
+    </property>
+    <property>
+      <name>instance.dfs.dir</name>
+      <value>/tmp</value>
+    </property>
+    <property>
+      <name>instance.dfs.uri</name>
+      <value>file:///</value>
+    </property>
+    
+    <property>
+      <name>crypto.default.key.strategy.hdfs.uri</name>
+      <value>file:///</value>
+    </property>
+    <property>
+      <name>crypto.default.key.strategy.key.location</name>
+      <value>/tmp/test.secret.key</value>
+    </property>
+    
+    <property>
+    	<name>crypto.default.key.strategy.cipher.suite</name>
+    	<value>AES/ECB/NoPadding</value>
+    </property>
+
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/core/src/test/resources/crypto-on-no-key-encryption-accumulo-site.xml
----------------------------------------------------------------------
diff --git a/core/src/test/resources/crypto-on-no-key-encryption-accumulo-site.xml b/core/src/test/resources/crypto-on-no-key-encryption-accumulo-site.xml
new file mode 100644
index 0000000..640abac
--- /dev/null
+++ b/core/src/test/resources/crypto-on-no-key-encryption-accumulo-site.xml
@@ -0,0 +1,144 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+	<!--
+  Put your site-specific accumulo configurations here.
+
+  The available configuration values along with their defaults
+  are documented in docs/config.html
+
+  Unless you are simply testing at your workstation, you will most
+  definitely need to change the three entries below.
+	-->
+
+    <property>
+      <name>instance.zookeeper.host</name>
+      <value>localhost:2181</value>
+      <description>comma separated list of zookeeper servers</description>
+    </property>
+
+    <property>
+      <name>logger.dir.walog</name>
+      <value>walogs</value>
+      <description>The directory used to store write-ahead logs on the local filesystem. It is possible to specify a comma-separated list of directories.</description>
+    </property>
+
+    <property>
+      <name>instance.secret</name>
+      <value>DEFAULT</value>
+      <description>A secret unique to a given instance that all servers must know in order to communicate with one another.
+                   Change it before initialization. To change it later use ./bin/accumulo org.apache.accumulo.server.util.ChangeSecret [oldpasswd] [newpasswd],
+                   and then update this file.
+      </description>
+    </property>
+
+    <property>
+      <name>tserver.memory.maps.max</name>
+      <value>80M</value>
+    </property>
+
+    <property>
+      <name>tserver.cache.data.size</name>
+      <value>7M</value>
+    </property>
+
+    <property>
+      <name>tserver.cache.index.size</name>
+      <value>20M</value>
+    </property>
+
+    <property>
+      <name>trace.password</name>
+      <!--
+        change this to the root user's password, and/or change the user below
+       -->
+      <value>password</value>
+    </property>
+
+    <property>
+      <name>trace.user</name>
+      <value>root</value>
+    </property>
+
+    <property>
+      <name>tserver.sort.buffer.size</name>
+      <value>50M</value>
+    </property>
+
+    <property>
+      <name>tserver.walog.max.size</name>
+      <value>100M</value>
+    </property>
+
+    <property>
+      <name>general.classpaths</name>
+      <value>
+    $ACCUMULO_HOME/server/target/classes/,
+    $ACCUMULO_HOME/core/target/classes/,
+    $ACCUMULO_HOME/start/target/classes/,
+    $ACCUMULO_HOME/fate/target/classes/,
+    $ACCUMULO_HOME/proxy/target/classes/,
+    $ACCUMULO_HOME/examples/target/classes/,
+	$ACCUMULO_HOME/lib/[^.].$ACCUMULO_VERSION.jar,
+	$ACCUMULO_HOME/lib/[^.].*.jar,
+	$ZOOKEEPER_HOME/zookeeper[^.].*.jar,
+	$HADOOP_CONF_DIR,
+	$HADOOP_PREFIX/[^.].*.jar,
+	$HADOOP_PREFIX/lib/[^.].*.jar,
+      </value>
+      <description>Classpaths that accumulo checks for updates and class files.
+      When using the Security Manager, please remove the ".../target/classes/" values.
+      </description>
+    </property>
+
+    <property>
+      <name>crypto.module.class</name>
+      <value>org.apache.accumulo.core.security.crypto.DefaultCryptoModule</value>
+    </property>
+    <property>
+      <name>crypto.cipher.suite</name>
+      <value>AES/CFB/PKCS5Padding</value>
+    </property>
+    <property>
+      <name>crypto.cipher.algorithm.name</name>
+      <value>AES</value>
+    </property>
+    <property>
+      <name>crypto.cipher.key.length</name>
+      <value>128</value>
+    </property>
+    <property>
+      <name>crypto.secure.rng</name>
+      <value>SHA1PRNG</value>
+    </property>
+    <property>
+      <name>crypto.secure.rng.provider</name>
+      <value>SUN</value>
+    </property>
+    <property>
+      <name>instance.dfs.dir</name>
+      <value>/tmp</value>
+    </property>
+    <property>
+      <name>instance.dfs.uri</name>
+      <value>file:///</value>
+    </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 37791e4..0dc56a4 100644
--- a/pom.xml
+++ b/pom.xml
@@ -142,7 +142,7 @@
       <dependency>
         <groupId>commons-codec</groupId>
         <artifactId>commons-codec</artifactId>
-        <version>1.4</version>
+        <version>1.7</version>
       </dependency>
       <dependency>
         <groupId>commons-collections</groupId>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java
index 21bef0f..6d574b9 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/log/DfsLogger.java
@@ -40,6 +40,8 @@ import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.security.crypto.CryptoModuleFactory;
+import org.apache.accumulo.core.security.crypto.CryptoModuleParameters;
 import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.StringUtil;
 import org.apache.accumulo.server.ServerConstants;
@@ -271,22 +273,26 @@ public class DfsLogger {
       }
       
       // Initialize the crypto operations.
-      @SuppressWarnings("deprecation")
       org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory.getCryptoModule(conf
           .getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
       
       // Initialize the log file with a header and the crypto params used to set up this log file.
       logFile.write(LOG_FILE_HEADER_V2.getBytes());
-      Map<String,String> cryptoOpts = conf.getConfiguration().getAllPropertiesWithPrefix(Property.CRYPTO_PREFIX);
+
+      CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf.getConfiguration());
+      
+      params.setPlaintextOutputStream(logFile);
+      
+      // In order to bootstrap the reading of this file later, we have to record the CryptoModule that was used to encipher it here,
+      // so that that crypto module can re-read its own parameters.
+      
+      logFile.writeUTF(conf.getConfiguration().get(Property.CRYPTO_MODULE_CLASS));
       
-      logFile.writeInt(cryptoOpts.size());
-      for (String key : cryptoOpts.keySet()) {
-        logFile.writeUTF(key);
-        logFile.writeUTF(cryptoOpts.get(key));
-      }
       
-      @SuppressWarnings("deprecation")
-      OutputStream encipheringOutputStream = cryptoModule.getEncryptingOutputStream(logFile, cryptoOpts);
+      //@SuppressWarnings("deprecation")
+      //OutputStream encipheringOutputStream = cryptoModule.getEncryptingOutputStream(logFile, cryptoOpts);
+      params = cryptoModule.getEncryptingOutputStream(params);
+      OutputStream encipheringOutputStream = params.getEncryptedOutputStream();
       
       // If the module just kicks back our original stream, then just use it, don't wrap it in
       // another data OutputStream.

http://git-wip-us.apache.org/repos/asf/accumulo/blob/65b5a3a3/server/src/main/java/org/apache/accumulo/server/tabletserver/log/LogSorter.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/log/LogSorter.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/log/LogSorter.java
index ea6c1ce..7518edb 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/log/LogSorter.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/log/LogSorter.java
@@ -19,8 +19,8 @@ package org.apache.accumulo.server.tabletserver.log;
 import java.io.DataInputStream;
 import java.io.EOFException;
 import java.io.IOException;
-import java.io.InputStream;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashMap;
@@ -34,6 +34,8 @@ import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.master.thrift.RecoveryStatus;
+import org.apache.accumulo.core.security.crypto.CryptoModuleFactory;
+import org.apache.accumulo.core.security.crypto.CryptoModuleParameters;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.SimpleThreadPool;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
@@ -111,45 +113,44 @@ public class LogSorter {
         fs.deleteRecursively(new Path(destPath));
         
         FSDataInputStream tmpInput = fs.open(srcPath);
-        DataInputStream tmpDecryptingInput = tmpInput;
-        
-        Map<String,String> cryptoOpts = new HashMap<String,String>();
-        tmpInput = DfsLogger.readHeader(fs, srcPath, cryptoOpts);
-        
-        if (!cryptoOpts.containsKey(Property.CRYPTO_MODULE_CLASS.getKey())) {
-          
-          log.debug("Log file " + name + " not encrypted");
-          
+                
+        byte[] magic = DfsLogger.LOG_FILE_HEADER_V2.getBytes();
+        byte[] magicBuffer = new byte[magic.length];
+        tmpInput.readFully(magicBuffer);
+        if (!Arrays.equals(magicBuffer, magic)) {
+          tmpInput.seek(0);
           synchronized (this) {
-            this.input = tmpInput;
-            this.decryptingInput = tmpInput;
+           this.input = tmpInput;
+           this.decryptingInput = tmpInput;
           }
-          
         } else {
+          // We read the crypto module class name here because we need to boot strap the class.  The class itself will read any 
+          // additional parameters it needs from the underlying stream.
+          String cryptoModuleClassname = tmpInput.readUTF();
+          org.apache.accumulo.core.security.crypto.CryptoModule cryptoModule = org.apache.accumulo.core.security.crypto.CryptoModuleFactory
+              .getCryptoModule(cryptoModuleClassname);
           
-          String cryptoModuleName = cryptoOpts.get(Property.CRYPTO_MODULE_CLASS.getKey());
-          if (cryptoModuleName == null) {
-            // If for whatever reason we didn't get a configured crypto module (old log file version, for instance)
-            // default to using the default configuration entry (usually NullCipher).
-            cryptoModuleName = AccumuloConfiguration.getDefaultConfiguration().get(Property.CRYPTO_MODULE_CLASS);
-          }
+          // Create the parameters and set the input stream into those parameters
+          CryptoModuleParameters params = CryptoModuleFactory.createParamsObjectFromAccumuloConfiguration(conf);
+          params.setEncryptedInputStream(tmpInput);
           
+          // Create the plaintext input stream from the encrypted one
+          params = cryptoModule.getDecryptingInputStream(params);
+          
+          // Store the plaintext input stream into member variables
           synchronized (this) {
             this.input = tmpInput;
+            
+            if (params.getPlaintextInputStream() instanceof DataInputStream) {
+              this.decryptingInput = (DataInputStream)params.getPlaintextInputStream();              
+            } else {
+              this.decryptingInput = new DataInputStream(params.getPlaintextInputStream());
+            }
+            
           }
           
-          @SuppressWarnings("deprecation")
-          org.apache.accumulo.core.security.crypto.CryptoModule cryptoOps = org.apache.accumulo.core.security.crypto.CryptoModuleFactory
-              .getCryptoModule(cryptoModuleName);
-          @SuppressWarnings("deprecation")
-          InputStream decryptingInputStream = cryptoOps.getDecryptingInputStream(input, cryptoOpts);
-          
-          tmpDecryptingInput = new DataInputStream(decryptingInputStream);
-          
-          synchronized (this) {
-            this.decryptingInput = tmpDecryptingInput;
-          }
         }
+                
         
         final long bufferSize = conf.getMemoryInBytes(Property.TSERV_SORT_BUFFER_SIZE);
         Thread.currentThread().setName("Sorting " + name + " for recovery");


[07/50] [abbrv] git commit: ACCUMULO-1537 fixing tests and timings, migrated visibility test

Posted by ct...@apache.org.
ACCUMULO-1537 fixing tests and timings, migrated visibility test

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1499414 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a5872e65
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a5872e65
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a5872e65

Branch: refs/heads/ACCUMULO-1496
Commit: a5872e651cfea9043fd75819e69294ca49253488
Parents: e8cdd0b
Author: Eric C. Newton <ec...@apache.org>
Authored: Wed Jul 3 14:17:38 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Wed Jul 3 14:17:38 2013 +0000

----------------------------------------------------------------------
 .../accumulo/test/TestMultiTableIngest.java     |   3 +
 .../test/functional/VisibilityTest.java         | 305 -------------------
 .../accumulo/test/functional/ReadWriteIT.java   |   4 +-
 .../accumulo/test/functional/VisibilityIT.java  | 294 ++++++++++++++++++
 test/system/auto/simple/visibility.py           |  30 --
 5 files changed, 299 insertions(+), 337 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/a5872e65/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java b/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
index 423acaa..8edcd92 100644
--- a/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/TestMultiTableIngest.java
@@ -20,6 +20,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map.Entry;
 
+import javax.crypto.Mac;
+
 import org.apache.accumulo.core.cli.BatchWriterOpts;
 import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -32,6 +34,7 @@ import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.server.cli.ClientOpts;
+import org.apache.accumulo.test.functional.MacTest;
 import org.apache.hadoop.io.Text;
 
 import com.beust.jcommander.Parameter;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a5872e65/test/src/main/java/org/apache/accumulo/test/functional/VisibilityTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/VisibilityTest.java b/test/src/main/java/org/apache/accumulo/test/functional/VisibilityTest.java
deleted file mode 100644
index ffe383b..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/VisibilityTest.java
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.ColumnVisibility;
-import org.apache.accumulo.core.util.ByteArraySet;
-import org.apache.hadoop.io.Text;
-
-public class VisibilityTest extends FunctionalTest {
-  
-  @Override
-  public void cleanup() throws Exception {
-    
-  }
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Arrays.asList(new TableSetup[] {new TableSetup("vt"),
-        new TableSetup("vt2", Collections.singletonMap(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "DEFLABEL"))});
-  }
-  
-  @Override
-  public void run() throws Exception {
-    
-    insertData();
-    queryData();
-    deleteData();
-    
-    insertDefaultData();
-    queryDefaultData();
-    
-  }
-  
-  private static SortedSet<String> nss(String... labels) {
-    TreeSet<String> ts = new TreeSet<String>();
-    
-    for (String s : labels) {
-      ts.add(s);
-    }
-    
-    return ts;
-  }
-  
-  private void mput(Mutation m, String cf, String cq, String cv, String val) {
-    ColumnVisibility le = new ColumnVisibility(cv.getBytes());
-    m.put(new Text(cf), new Text(cq), le, new Value(val.getBytes()));
-  }
-  
-  private void mputDelete(Mutation m, String cf, String cq, String cv) {
-    ColumnVisibility le = new ColumnVisibility(cv.getBytes());
-    m.putDelete(new Text(cf), new Text(cq), le);
-  }
-  
-  private void insertData() throws Exception {
-    
-    BatchWriter bw = getConnector().createBatchWriter("vt", new BatchWriterConfig());
-    Mutation m1 = new Mutation(new Text("row1"));
-    
-    mput(m1, "cf1", "cq1", "", "v1");
-    mput(m1, "cf1", "cq1", "A", "v2");
-    mput(m1, "cf1", "cq1", "B", "v3");
-    mput(m1, "cf1", "cq1", "A&B", "v4");
-    mput(m1, "cf1", "cq1", "A&(L|M)", "v5");
-    mput(m1, "cf1", "cq1", "B&(L|M)", "v6");
-    mput(m1, "cf1", "cq1", "A&B&(L|M)", "v7");
-    mput(m1, "cf1", "cq1", "A&B&(L)", "v8");
-    mput(m1, "cf1", "cq1", "A&FOO", "v9");
-    mput(m1, "cf1", "cq1", "A&FOO&(L|M)", "v10");
-    mput(m1, "cf1", "cq1", "FOO", "v11");
-    mput(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)", "v12");
-    mput(m1, "cf1", "cq1", "A&B&(L|M|FOO)", "v13");
-    
-    bw.addMutation(m1);
-    bw.close();
-  }
-  
-  private void deleteData() throws Exception {
-    
-    BatchWriter bw = getConnector().createBatchWriter("vt", new BatchWriterConfig());
-    Mutation m1 = new Mutation(new Text("row1"));
-    
-    mputDelete(m1, "cf1", "cq1", "");
-    mputDelete(m1, "cf1", "cq1", "A");
-    mputDelete(m1, "cf1", "cq1", "A&B");
-    mputDelete(m1, "cf1", "cq1", "B&(L|M)");
-    mputDelete(m1, "cf1", "cq1", "A&B&(L)");
-    mputDelete(m1, "cf1", "cq1", "A&FOO&(L|M)");
-    mputDelete(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)");
-    mputDelete(m1, "cf1", "cq1", "FOO&A"); // should not delete anything
-    
-    bw.addMutation(m1);
-    bw.close();
-    
-    Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>();
-    
-    expected.put(nss("A", "L"), nss("v5"));
-    expected.put(nss("A", "M"), nss("v5"));
-    expected.put(nss("B"), nss("v3"));
-    expected.put(nss("Z"), nss());
-    expected.put(nss("A", "B", "L"), nss("v7", "v13"));
-    expected.put(nss("A", "B", "M"), nss("v7", "v13"));
-    expected.put(nss("A", "B", "FOO"), nss("v13"));
-    expected.put(nss("FOO"), nss("v11"));
-    expected.put(nss("A", "FOO"), nss("v9"));
-    
-    queryData(nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
-  }
-  
-  private void insertDefaultData() throws Exception {
-    BatchWriter bw = getConnector().createBatchWriter("vt2", new BatchWriterConfig());
-    Mutation m1 = new Mutation(new Text("row1"));
-    
-    mput(m1, "cf1", "cq1", "BASE", "v1");
-    mput(m1, "cf1", "cq2", "DEFLABEL", "v2");
-    mput(m1, "cf1", "cq3", "", "v3");
-    
-    bw.addMutation(m1);
-    bw.close();
-  }
-  
-  private static void uniqueCombos(List<Set<String>> all, Set<String> prefix, Set<String> suffix) {
-    
-    all.add(prefix);
-    
-    TreeSet<String> ss = new TreeSet<String>(suffix);
-    
-    for (String s : suffix) {
-      TreeSet<String> ps = new TreeSet<String>(prefix);
-      ps.add(s);
-      ss.remove(s);
-      
-      uniqueCombos(all, ps, ss);
-    }
-  }
-  
-  private void queryData() throws Exception {
-    Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>();
-    expected.put(nss(), nss("v1"));
-    expected.put(nss("A"), nss("v2"));
-    expected.put(nss("A", "L"), nss("v5"));
-    expected.put(nss("A", "M"), nss("v5"));
-    expected.put(nss("B"), nss("v3"));
-    expected.put(nss("B", "L"), nss("v6"));
-    expected.put(nss("B", "M"), nss("v6"));
-    expected.put(nss("Z"), nss());
-    expected.put(nss("A", "B"), nss("v4"));
-    expected.put(nss("A", "B", "L"), nss("v7", "v8", "v13"));
-    expected.put(nss("A", "B", "M"), nss("v7", "v13"));
-    expected.put(nss("A", "B", "FOO"), nss("v13"));
-    expected.put(nss("FOO"), nss("v11"));
-    expected.put(nss("A", "FOO"), nss("v9"));
-    expected.put(nss("A", "FOO", "L"), nss("v10", "v12"));
-    expected.put(nss("A", "FOO", "M"), nss("v10", "v12"));
-    expected.put(nss("B", "FOO", "L"), nss("v12"));
-    expected.put(nss("B", "FOO", "M"), nss("v12"));
-    
-    queryData(nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
-    queryData(nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "L", "M", "Z"), expected);
-    queryData(nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "Z"), expected);
-    queryData(nss("A", "B", "FOO", "L", "M", "Z"), nss("Z"), expected);
-    queryData(nss("A", "B", "FOO", "L", "M", "Z"), nss(), expected);
-  }
-  
-  private void queryData(Set<String> allAuths, Set<String> userAuths, Map<Set<String>,Set<String>> expected) throws Exception {
-    
-    getConnector().securityOperations().changeUserAuthorizations(getPrincipal(), new Authorizations(nbas(userAuths)));
-    
-    ArrayList<Set<String>> combos = new ArrayList<Set<String>>();
-    uniqueCombos(combos, nss(), allAuths);
-    
-    for (Set<String> set1 : combos) {
-      Set<String> e = new TreeSet<String>();
-      for (Set<String> set2 : combos) {
-        
-        set2 = new HashSet<String>(set2);
-        set2.retainAll(userAuths);
-        
-        if (set1.containsAll(set2) && expected.containsKey(set2)) {
-          e.addAll(expected.get(set2));
-        }
-      }
-      
-      set1.retainAll(userAuths);
-      verify(set1, e);
-    }
-    
-  }
-  
-  private void queryDefaultData() throws Exception {
-    Scanner scanner;
-    
-    // should return no records
-    getConnector().securityOperations().changeUserAuthorizations(getPrincipal(), new Authorizations("BASE", "DEFLABEL"));
-    scanner = getConnector().createScanner("vt2", new Authorizations());
-    verifyDefault(scanner, 0);
-    
-    // should return one record
-    scanner = getConnector().createScanner("vt2", new Authorizations("BASE"));
-    verifyDefault(scanner, 1);
-    
-    // should return all three records
-    scanner = getConnector().createScanner("vt2", new Authorizations("BASE", "DEFLABEL"));
-    verifyDefault(scanner, 3);
-  }
-  
-  private void verifyDefault(Scanner scanner, int expectedCount) throws Exception {
-    for (@SuppressWarnings("unused")
-    Entry<Key,Value> entry : scanner)
-      --expectedCount;
-    if (expectedCount != 0)
-      throw new Exception(" expected count !=0 " + expectedCount);
-  }
-  
-  private void verify(Set<String> auths, Set<String> expectedValues) throws Exception {
-    ByteArraySet bas = nbas(auths);
-    
-    try {
-      verify(bas, expectedValues.toArray(new String[0]));
-    } catch (Exception e) {
-      throw new Exception("Verification failed auths=" + auths + " exp=" + expectedValues, e);
-    }
-  }
-  
-  private ByteArraySet nbas(Set<String> auths) {
-    ByteArraySet bas = new ByteArraySet();
-    for (String auth : auths) {
-      bas.add(auth.getBytes());
-    }
-    return bas;
-  }
-  
-  private void verify(ByteArraySet nss, String... expected) throws Exception {
-    Scanner scanner = getConnector().createScanner("vt", new Authorizations(nss));
-    verify(scanner.iterator(), expected);
-    
-    BatchScanner bs = getConnector().createBatchScanner("vt", new Authorizations(nss), 3);
-    bs.setRanges(Collections.singleton(new Range()));
-    verify(bs.iterator(), expected);
-    bs.close();
-  }
-  
-  private void verify(Iterator<Entry<Key,Value>> iter, String... expected) throws Exception {
-    HashSet<String> valuesSeen = new HashSet<String>();
-    
-    while (iter.hasNext()) {
-      Entry<Key,Value> entry = iter.next();
-      if (valuesSeen.contains(entry.getValue().toString())) {
-        throw new Exception("Value seen twice");
-      }
-      valuesSeen.add(entry.getValue().toString());
-    }
-    
-    for (String ev : expected) {
-      if (!valuesSeen.remove(ev)) {
-        throw new Exception("Did not see expected value " + ev);
-      }
-    }
-    
-    if (valuesSeen.size() != 0) {
-      throw new Exception("Saw more values than expected " + valuesSeen);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a5872e65/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
index c4c5980..d058724 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -56,7 +56,7 @@ import org.junit.Test;
 
 public class ReadWriteIT extends MacTest {
   
-  static final int ROWS = 200000;
+  static final int ROWS = 20000;
   static final int COLS = 1;
   static final String COLF = "colf";
   
@@ -226,7 +226,7 @@ public class ReadWriteIT extends MacTest {
     assertTrue(foundFile);
   }
   
-  @Test(timeout=60*1000)
+  @Test(timeout=90*1000)
   public void localityGroupChange() throws Exception {
     // Make changes to locality groups and ensure nothing is lostssh
     final Connector connector = getConnector();

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a5872e65/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java b/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
new file mode 100644
index 0000000..7266430
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/VisibilityIT.java
@@ -0,0 +1,294 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.ColumnVisibility;
+import org.apache.accumulo.core.util.ByteArraySet;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class VisibilityIT extends MacTest {
+  
+  @Test
+  public void run() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("vt");
+    c.tableOperations().create("vt2");
+    c.tableOperations().setProperty("vt2", Property.TABLE_DEFAULT_SCANTIME_VISIBILITY.getKey(), "DEFLABEL");
+    
+    insertData(c);
+    queryData(c);
+    deleteData(c);
+    
+    insertDefaultData(c);
+    queryDefaultData(c);
+    
+  }
+  
+  private static SortedSet<String> nss(String... labels) {
+    TreeSet<String> ts = new TreeSet<String>();
+    
+    for (String s : labels) {
+      ts.add(s);
+    }
+    
+    return ts;
+  }
+  
+  private void mput(Mutation m, String cf, String cq, String cv, String val) {
+    ColumnVisibility le = new ColumnVisibility(cv.getBytes());
+    m.put(new Text(cf), new Text(cq), le, new Value(val.getBytes()));
+  }
+  
+  private void mputDelete(Mutation m, String cf, String cq, String cv) {
+    ColumnVisibility le = new ColumnVisibility(cv.getBytes());
+    m.putDelete(new Text(cf), new Text(cq), le);
+  }
+  
+  private void insertData(Connector c) throws Exception {
+    
+    BatchWriter bw = c.createBatchWriter("vt", new BatchWriterConfig());
+    Mutation m1 = new Mutation(new Text("row1"));
+    
+    mput(m1, "cf1", "cq1", "", "v1");
+    mput(m1, "cf1", "cq1", "A", "v2");
+    mput(m1, "cf1", "cq1", "B", "v3");
+    mput(m1, "cf1", "cq1", "A&B", "v4");
+    mput(m1, "cf1", "cq1", "A&(L|M)", "v5");
+    mput(m1, "cf1", "cq1", "B&(L|M)", "v6");
+    mput(m1, "cf1", "cq1", "A&B&(L|M)", "v7");
+    mput(m1, "cf1", "cq1", "A&B&(L)", "v8");
+    mput(m1, "cf1", "cq1", "A&FOO", "v9");
+    mput(m1, "cf1", "cq1", "A&FOO&(L|M)", "v10");
+    mput(m1, "cf1", "cq1", "FOO", "v11");
+    mput(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)", "v12");
+    mput(m1, "cf1", "cq1", "A&B&(L|M|FOO)", "v13");
+    
+    bw.addMutation(m1);
+    bw.close();
+  }
+  
+  private void deleteData(Connector c) throws Exception {
+    
+    BatchWriter bw = c.createBatchWriter("vt", new BatchWriterConfig());
+    Mutation m1 = new Mutation(new Text("row1"));
+    
+    mputDelete(m1, "cf1", "cq1", "");
+    mputDelete(m1, "cf1", "cq1", "A");
+    mputDelete(m1, "cf1", "cq1", "A&B");
+    mputDelete(m1, "cf1", "cq1", "B&(L|M)");
+    mputDelete(m1, "cf1", "cq1", "A&B&(L)");
+    mputDelete(m1, "cf1", "cq1", "A&FOO&(L|M)");
+    mputDelete(m1, "cf1", "cq1", "(A|B)&FOO&(L|M)");
+    mputDelete(m1, "cf1", "cq1", "FOO&A"); // should not delete anything
+    
+    bw.addMutation(m1);
+    bw.close();
+    
+    Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>();
+    
+    expected.put(nss("A", "L"), nss("v5"));
+    expected.put(nss("A", "M"), nss("v5"));
+    expected.put(nss("B"), nss("v3"));
+    expected.put(nss("Z"), nss());
+    expected.put(nss("A", "B", "L"), nss("v7", "v13"));
+    expected.put(nss("A", "B", "M"), nss("v7", "v13"));
+    expected.put(nss("A", "B", "FOO"), nss("v13"));
+    expected.put(nss("FOO"), nss("v11"));
+    expected.put(nss("A", "FOO"), nss("v9"));
+    
+    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
+  }
+  
+  private void insertDefaultData(Connector c) throws Exception {
+    BatchWriter bw = c.createBatchWriter("vt2", new BatchWriterConfig());
+    Mutation m1 = new Mutation(new Text("row1"));
+    
+    mput(m1, "cf1", "cq1", "BASE", "v1");
+    mput(m1, "cf1", "cq2", "DEFLABEL", "v2");
+    mput(m1, "cf1", "cq3", "", "v3");
+    
+    bw.addMutation(m1);
+    bw.close();
+  }
+  
+  private static void uniqueCombos(List<Set<String>> all, Set<String> prefix, Set<String> suffix) {
+    
+    all.add(prefix);
+    
+    TreeSet<String> ss = new TreeSet<String>(suffix);
+    
+    for (String s : suffix) {
+      TreeSet<String> ps = new TreeSet<String>(prefix);
+      ps.add(s);
+      ss.remove(s);
+      
+      uniqueCombos(all, ps, ss);
+    }
+  }
+  
+  private void queryData(Connector c) throws Exception {
+    Map<Set<String>,Set<String>> expected = new HashMap<Set<String>,Set<String>>();
+    expected.put(nss(), nss("v1"));
+    expected.put(nss("A"), nss("v2"));
+    expected.put(nss("A", "L"), nss("v5"));
+    expected.put(nss("A", "M"), nss("v5"));
+    expected.put(nss("B"), nss("v3"));
+    expected.put(nss("B", "L"), nss("v6"));
+    expected.put(nss("B", "M"), nss("v6"));
+    expected.put(nss("Z"), nss());
+    expected.put(nss("A", "B"), nss("v4"));
+    expected.put(nss("A", "B", "L"), nss("v7", "v8", "v13"));
+    expected.put(nss("A", "B", "M"), nss("v7", "v13"));
+    expected.put(nss("A", "B", "FOO"), nss("v13"));
+    expected.put(nss("FOO"), nss("v11"));
+    expected.put(nss("A", "FOO"), nss("v9"));
+    expected.put(nss("A", "FOO", "L"), nss("v10", "v12"));
+    expected.put(nss("A", "FOO", "M"), nss("v10", "v12"));
+    expected.put(nss("B", "FOO", "L"), nss("v12"));
+    expected.put(nss("B", "FOO", "M"), nss("v12"));
+    
+    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "FOO", "L", "M", "Z"), expected);
+    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "B", "L", "M", "Z"), expected);
+    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss("A", "Z"), expected);
+    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss("Z"), expected);
+    queryData(c, nss("A", "B", "FOO", "L", "M", "Z"), nss(), expected);
+  }
+  
+  private void queryData(Connector c, Set<String> allAuths, Set<String> userAuths, Map<Set<String>,Set<String>> expected) throws Exception {
+    
+    c.securityOperations().changeUserAuthorizations("root", new Authorizations(nbas(userAuths)));
+    
+    ArrayList<Set<String>> combos = new ArrayList<Set<String>>();
+    uniqueCombos(combos, nss(), allAuths);
+    
+    for (Set<String> set1 : combos) {
+      Set<String> e = new TreeSet<String>();
+      for (Set<String> set2 : combos) {
+        
+        set2 = new HashSet<String>(set2);
+        set2.retainAll(userAuths);
+        
+        if (set1.containsAll(set2) && expected.containsKey(set2)) {
+          e.addAll(expected.get(set2));
+        }
+      }
+      
+      set1.retainAll(userAuths);
+      verify(c, set1, e);
+    }
+    
+  }
+  
+  private void queryDefaultData(Connector c) throws Exception {
+    Scanner scanner;
+    
+    // should return no records
+    c.securityOperations().changeUserAuthorizations("root", new Authorizations("BASE", "DEFLABEL"));
+    scanner = getConnector().createScanner("vt2", new Authorizations());
+    verifyDefault(scanner, 0);
+    
+    // should return one record
+    scanner = getConnector().createScanner("vt2", new Authorizations("BASE"));
+    verifyDefault(scanner, 1);
+    
+    // should return all three records
+    scanner = getConnector().createScanner("vt2", new Authorizations("BASE", "DEFLABEL"));
+    verifyDefault(scanner, 3);
+  }
+  
+  private void verifyDefault(Scanner scanner, int expectedCount) throws Exception {
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> entry : scanner)
+      --expectedCount;
+    if (expectedCount != 0)
+      throw new Exception(" expected count !=0 " + expectedCount);
+  }
+  
+  private void verify(Connector c, Set<String> auths, Set<String> expectedValues) throws Exception {
+    ByteArraySet bas = nbas(auths);
+    
+    try {
+      verify(c, bas, expectedValues.toArray(new String[0]));
+    } catch (Exception e) {
+      throw new Exception("Verification failed auths=" + auths + " exp=" + expectedValues, e);
+    }
+  }
+  
+  private ByteArraySet nbas(Set<String> auths) {
+    ByteArraySet bas = new ByteArraySet();
+    for (String auth : auths) {
+      bas.add(auth.getBytes());
+    }
+    return bas;
+  }
+  
+  private void verify(Connector c, ByteArraySet nss, String... expected) throws Exception {
+    Scanner scanner = c.createScanner("vt", new Authorizations(nss));
+    verify(scanner.iterator(), expected);
+    
+    BatchScanner bs = getConnector().createBatchScanner("vt", new Authorizations(nss), 3);
+    bs.setRanges(Collections.singleton(new Range()));
+    verify(bs.iterator(), expected);
+    bs.close();
+  }
+  
+  private void verify(Iterator<Entry<Key,Value>> iter, String... expected) throws Exception {
+    HashSet<String> valuesSeen = new HashSet<String>();
+    
+    while (iter.hasNext()) {
+      Entry<Key,Value> entry = iter.next();
+      if (valuesSeen.contains(entry.getValue().toString())) {
+        throw new Exception("Value seen twice");
+      }
+      valuesSeen.add(entry.getValue().toString());
+    }
+    
+    for (String ev : expected) {
+      if (!valuesSeen.remove(ev)) {
+        throw new Exception("Did not see expected value " + ev);
+      }
+    }
+    
+    if (valuesSeen.size() != 0) {
+      throw new Exception("Saw more values than expected " + valuesSeen);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/a5872e65/test/system/auto/simple/visibility.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/visibility.py b/test/system/auto/simple/visibility.py
deleted file mode 100755
index 60f7ae4..0000000
--- a/test/system/auto/simple/visibility.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class VisibilityTest(JavaTest):
-    "Test Column Visibility"
-
-    order = 21
-    testClass="org.apache.accumulo.test.functional.VisibilityTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(VisibilityTest())
-    return result


[37/50] [abbrv] git commit: ACCUMULO-1537 fixed NPE in MAC

Posted by ct...@apache.org.
ACCUMULO-1537 fixed NPE in MAC

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1502381 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/fb2c0c75
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/fb2c0c75
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/fb2c0c75

Branch: refs/heads/ACCUMULO-1496
Commit: fb2c0c75141177da429bc9094d6d5b15ba12f05e
Parents: 720e27a
Author: Keith Turner <kt...@apache.org>
Authored: Thu Jul 11 22:04:55 2013 +0000
Committer: Keith Turner <kt...@apache.org>
Committed: Thu Jul 11 22:04:55 2013 +0000

----------------------------------------------------------------------
 .../accumulo/minicluster/MiniAccumuloCluster.java      | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/fb2c0c75/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index 16b1381..c492e1b 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -244,11 +244,14 @@ public class MiniAccumuloCluster {
 
     File nativeMap = new File(config.getLibDir().getAbsolutePath() + "/native/map");
     nativeMap.mkdirs();
-    String testRoot = new File(new File(System.getProperty("user.dir")).getParent() + "/server/src/main/c++/nativeMap").getAbsolutePath();
-    for (String file : new File(testRoot).list()) {
-      File src = new File(testRoot, file);
-      if (src.isFile() && file.startsWith("libNativeMap"))
-        FileUtils.copyFile(src, new File(nativeMap, file));
+    File testRoot = new File(new File(new File(System.getProperty("user.dir")).getParent() + "/server/src/main/c++/nativeMap").getAbsolutePath());
+    
+    if (testRoot.exists()) {
+      for (String file : testRoot.list()) {
+        File src = new File(testRoot, file);
+        if (src.isFile() && file.startsWith("libNativeMap"))
+          FileUtils.copyFile(src, new File(nativeMap, file));
+      }
     }
   }
 


[49/50] [abbrv] git commit: ACCUMULO-1496 fix extra dependency

Posted by ct...@apache.org.
ACCUMULO-1496 fix extra dependency


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/964e7614
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/964e7614
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/964e7614

Branch: refs/heads/ACCUMULO-1496
Commit: 964e7614f69fdc4968faa8487ea51971c8be2963
Parents: f8b1c64
Author: Christopher Tubbs <ct...@apache.org>
Authored: Tue Jul 16 15:59:03 2013 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Tue Jul 16 15:59:03 2013 -0400

----------------------------------------------------------------------
 pom.xml | 5 -----
 1 file changed, 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/964e7614/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 9a2efea..1955f08 100644
--- a/pom.xml
+++ b/pom.xml
@@ -206,11 +206,6 @@
         <version>1.2.16</version>
       </dependency>
       <dependency>
-        <groupId>net.sf.scannotation</groupId>
-        <artifactId>scannotation</artifactId>
-        <version>1.0.2</version>
-      </dependency>
-      <dependency>
         <groupId>org.apache.accumulo</groupId>
         <artifactId>accumulo-core</artifactId>
         <version>${project.version}</version>


[22/50] [abbrv] git commit: ACCUMULO-1561 updating rpm-maven-plugin version

Posted by ct...@apache.org.
ACCUMULO-1561 updating rpm-maven-plugin version

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1501024 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/8d815a87
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/8d815a87
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/8d815a87

Branch: refs/heads/ACCUMULO-1496
Commit: 8d815a87793550a6240fc116184c5d20d3363847
Parents: 2d47964
Author: Mike Drob <md...@apache.org>
Authored: Mon Jul 8 23:50:17 2013 +0000
Committer: Mike Drob <md...@apache.org>
Committed: Mon Jul 8 23:50:17 2013 +0000

----------------------------------------------------------------------
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/8d815a87/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 6f0d4cd..04ca5d7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -508,7 +508,7 @@
         <plugin>
           <groupId>org.codehaus.mojo</groupId>
           <artifactId>rpm-maven-plugin</artifactId>
-          <version>2.1-alpha-2</version>
+          <version>2.1-alpha-3</version>
         </plugin>
         <plugin>
           <groupId>org.eclipse.m2e</groupId>


[08/50] [abbrv] ACCUMULO-1481 : Add tests for splitting/merging root table; refactor to consolidate metadata constants and structures in an organized way; begin consolidating metadata ops into a servicer interface to abstract the code that actually does

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
index 678eb8d..561d453 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/PermissionsIT.java
@@ -40,10 +40,10 @@ import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.SystemPermission;
 import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
@@ -51,7 +51,7 @@ public class PermissionsIT extends MacTest {
   private static final String TEST_USER = "test_user";
   private static final PasswordToken TEST_PASS = new PasswordToken("test_password");
   
-  @Test(timeout=60*1000)
+  @Test(timeout = 60 * 1000)
   public void systemPermissionsTest() throws Exception {
     // verify that the test is being run by root
     Connector c = getConnector();
@@ -87,16 +87,16 @@ public class PermissionsIT extends MacTest {
     }
   }
   
-  static Map<String, String> map(Iterable<Entry<String,String>> i) {
-    Map<String, String> result = new HashMap<String, String>();
-    for (Entry<String, String> e : i) {
+  static Map<String,String> map(Iterable<Entry<String,String>> i) {
+    Map<String,String> result = new HashMap<String,String>();
+    for (Entry<String,String> e : i) {
       result.put(e.getKey(), e.getValue());
     }
     return result;
   }
   
   private static void testMissingSystemPermission(Connector root_conn, Connector test_user_conn, SystemPermission perm) throws AccumuloException,
-  TableExistsException, AccumuloSecurityException, TableNotFoundException {
+      TableExistsException, AccumuloSecurityException, TableNotFoundException {
     String tableName, user, password = "password";
     log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
     
@@ -196,7 +196,7 @@ public class PermissionsIT extends MacTest {
   }
   
   private static void testGrantedSystemPermission(Connector root_conn, Connector test_user_conn, SystemPermission perm) throws AccumuloException,
-  AccumuloSecurityException, TableNotFoundException, TableExistsException {
+      AccumuloSecurityException, TableNotFoundException, TableExistsException {
     String tableName, user, password = "password";
     log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
     
@@ -260,7 +260,7 @@ public class PermissionsIT extends MacTest {
   }
   
   private static void verifyHasOnlyTheseSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException,
-  AccumuloSecurityException {
+      AccumuloSecurityException {
     List<SystemPermission> permList = Arrays.asList(perms);
     for (SystemPermission p : SystemPermission.values()) {
       if (permList.contains(p)) {
@@ -276,7 +276,7 @@ public class PermissionsIT extends MacTest {
   }
   
   private static void verifyHasNoSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException,
-  AccumuloSecurityException {
+      AccumuloSecurityException {
     for (SystemPermission p : perms)
       if (root_conn.securityOperations().hasSystemPermission(user, p))
         throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
@@ -292,8 +292,7 @@ public class PermissionsIT extends MacTest {
     Connector test_user_conn = c.getInstance().getConnector(TEST_USER, TEST_PASS);
     
     // check for read-only access to metadata table
-    verifyHasOnlyTheseTablePermissions(c, c.whoami(), MetadataTable.NAME, TablePermission.READ,
-        TablePermission.ALTER_TABLE);
+    verifyHasOnlyTheseTablePermissions(c, c.whoami(), MetadataTable.NAME, TablePermission.READ, TablePermission.ALTER_TABLE);
     verifyHasOnlyTheseTablePermissions(c, TEST_USER, MetadataTable.NAME, TablePermission.READ);
     
     // test each permission
@@ -313,8 +312,7 @@ public class PermissionsIT extends MacTest {
     }
   }
   
-  private void createTestTable(Connector c) throws Exception,
-  MutationsRejectedException {
+  private void createTestTable(Connector c) throws Exception, MutationsRejectedException {
     if (!c.tableOperations().exists(TEST_TABLE)) {
       // create the test table
       c.tableOperations().create(TEST_TABLE);
@@ -364,8 +362,7 @@ public class PermissionsIT extends MacTest {
             writer.close();
           } catch (MutationsRejectedException e1) {
             if (e1.getAuthorizationFailuresMap().size() > 0)
-              throw new AccumuloSecurityException(test_user_conn.whoami(), org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.PERMISSION_DENIED,
-                  e1);
+              throw new AccumuloSecurityException(test_user_conn.whoami(), org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.PERMISSION_DENIED, e1);
           }
           throw new IllegalStateException("Should NOT be able to write to a table");
         } catch (AccumuloSecurityException e) {
@@ -411,7 +408,7 @@ public class PermissionsIT extends MacTest {
   }
   
   private static void testGrantedTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm) throws AccumuloException,
-  TableExistsException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
+      TableExistsException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
     Scanner scanner;
     BatchWriter writer;
     Mutation m;
@@ -451,7 +448,7 @@ public class PermissionsIT extends MacTest {
   }
   
   private static void verifyHasOnlyTheseTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
-  AccumuloSecurityException {
+      AccumuloSecurityException {
     List<TablePermission> permList = Arrays.asList(perms);
     for (TablePermission p : TablePermission.values()) {
       if (permList.contains(p)) {
@@ -467,7 +464,7 @@ public class PermissionsIT extends MacTest {
   }
   
   private static void verifyHasNoTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
-  AccumuloSecurityException {
+      AccumuloSecurityException {
     for (TablePermission p : perms)
       if (root_conn.securityOperations().hasTablePermission(user, table, p))
         throw new IllegalStateException(user + " SHOULD NOT have table permission " + p + " for table " + table);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
index d058724..bc89e20 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -44,8 +44,9 @@ import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.rfile.PrintInfo;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.monitor.Monitor;
 import org.apache.accumulo.server.util.Admin;
 import org.apache.accumulo.test.TestIngest;
@@ -60,7 +61,7 @@ public class ReadWriteIT extends MacTest {
   static final int COLS = 1;
   static final String COLF = "colf";
   
-  @Test(timeout=60*1000)
+  @Test(timeout = 60 * 1000)
   public void sunnyDay() throws Exception {
     // Start accumulo, create a table, insert some data, verify we can read it out.
     // Shutdown cleanly.
@@ -99,6 +100,7 @@ public class ReadWriteIT extends MacTest {
   private static void verify(Connector connector, int rows, int cols, int width, int offset) throws Exception {
     verify(connector, rows, cols, width, offset, COLF);
   }
+  
   private static void verify(Connector connector, int rows, int cols, int width, int offset, String colf) throws Exception {
     ScannerOpts scannerOpts = new ScannerOpts();
     VerifyIngest.Opts opts = new VerifyIngest.Opts();
@@ -110,11 +112,11 @@ public class ReadWriteIT extends MacTest {
     VerifyIngest.verifyIngest(connector, opts, scannerOpts);
   }
   
-  public static String[] args(String ...args) {
+  public static String[] args(String... args) {
     return args;
   }
   
-  @Test(timeout=60*1000)
+  @Test(timeout = 60 * 1000)
   public void multiTableTest() throws Exception {
     // Write to multiple tables
     String instance = cluster.getInstanceName();
@@ -123,7 +125,7 @@ public class ReadWriteIT extends MacTest {
     TestMultiTableIngest.main(args("--count", "" + ROWS, "--readonly", "-u", "root", "-i", instance, "-z", keepers, "-p", PASSWORD));
   }
   
-  @Test(timeout=60*1000)
+  @Test(timeout = 60 * 1000)
   public void largeTest() throws Exception {
     // write a few large values
     Connector connector = getConnector();
@@ -131,13 +133,13 @@ public class ReadWriteIT extends MacTest {
     verify(connector, 2, 1, 500000, 0);
   }
   
-  @Test(timeout=60*1000)
+  @Test(timeout = 60 * 1000)
   public void interleaved() throws Exception {
     // read and write concurrently
     final Connector connector = getConnector();
     interleaveTest(connector);
   }
- 
+  
   static void interleaveTest(final Connector connector) throws Exception {
     final AtomicBoolean fail = new AtomicBoolean(false);
     final int CHUNKSIZE = ROWS / 10;
@@ -146,6 +148,7 @@ public class ReadWriteIT extends MacTest {
     for (i = 0; i < ROWS; i += CHUNKSIZE) {
       final int start = i;
       Thread verify = new Thread() {
+        @Override
         public void run() {
           try {
             verify(connector, CHUNKSIZE, 1, 50, start);
@@ -161,7 +164,9 @@ public class ReadWriteIT extends MacTest {
     verify(connector, CHUNKSIZE, 1, 50, i);
   }
   
-  public static Text t(String s) { return new Text(s); }
+  public static Text t(String s) {
+    return new Text(s);
+  }
   
   public static Mutation m(String row, String cf, String cq, String value) {
     Mutation m = new Mutation(t(row));
@@ -169,10 +174,9 @@ public class ReadWriteIT extends MacTest {
     return m;
   }
   
-  
-  @Test(timeout=60*1000)
+  @Test(timeout = 60 * 1000)
   public void localityGroupPerf() throws Exception {
-    // verify that locality groups can make look-ups faster 
+    // verify that locality groups can make look-ups faster
     final Connector connector = getConnector();
     connector.tableOperations().create("test_ingest");
     connector.tableOperations().setProperty("test_ingest", "table.group.g1", "colf");
@@ -185,25 +189,27 @@ public class ReadWriteIT extends MacTest {
     long now = System.currentTimeMillis();
     Scanner scanner = connector.createScanner("test_ingest", Authorizations.EMPTY);
     scanner.fetchColumnFamily(new Text("colf"));
-    for (@SuppressWarnings("unused") Entry<Key,Value> entry : scanner)
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> entry : scanner)
       ;
     long diff = System.currentTimeMillis() - now;
     now = System.currentTimeMillis();
     scanner = connector.createScanner("test_ingest", Authorizations.EMPTY);
     scanner.fetchColumnFamily(new Text("colf2"));
-    for (@SuppressWarnings("unused") Entry<Key,Value> entry : scanner)
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> entry : scanner)
       ;
     bw.close();
     long diff2 = System.currentTimeMillis() - now;
     assertTrue(diff2 < diff);
   }
   
-  @Test(timeout=60*1000)
+  @Test(timeout = 60 * 1000)
   public void sunnyLG() throws Exception {
     // create a locality group, write to it and ensure it exists in the RFiles that result
     final Connector connector = getConnector();
     connector.tableOperations().create("test_ingest");
-    Map<String, Set<Text>> groups = new TreeMap<String, Set<Text>>();
+    Map<String,Set<Text>> groups = new TreeMap<String,Set<Text>>();
     groups.put("g1", Collections.singleton(t("colf")));
     connector.tableOperations().setLocalityGroups("test_ingest", groups);
     ingest(connector, 2000, 1, 50, 0);
@@ -212,9 +218,9 @@ public class ReadWriteIT extends MacTest {
     BatchScanner bscanner = connector.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 1);
     String tableId = connector.tableOperations().tableIdMap().get("test_ingest");
     bscanner.setRanges(Collections.singletonList(new Range(new Text(tableId + ";"), new Text(tableId + "<"))));
-    bscanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+    bscanner.fetchColumnFamily(DataFileColumnFamily.NAME);
     boolean foundFile = false;
-    for (Entry<Key,Value> entry: bscanner) {
+    for (Entry<Key,Value> entry : bscanner) {
       foundFile = true;
       Process info = cluster.exec(PrintInfo.class, entry.getKey().getColumnQualifier().toString());
       assertEquals(0, info.waitFor());
@@ -226,34 +232,24 @@ public class ReadWriteIT extends MacTest {
     assertTrue(foundFile);
   }
   
-  @Test(timeout=90*1000)
+  @Test(timeout = 90 * 1000)
   public void localityGroupChange() throws Exception {
     // Make changes to locality groups and ensure nothing is lostssh
     final Connector connector = getConnector();
     TableOperations to = connector.tableOperations();
     to.create("test_ingest");
-    String[] config = new String[] {
-      "lg1:colf",
-      null,
-      "lg1:colf,xyz",
-      "lg1:colf,xyz;lg2:c1,c2"
-    };
+    String[] config = new String[] {"lg1:colf", null, "lg1:colf,xyz", "lg1:colf,xyz;lg2:c1,c2"};
     int i = 0;
     for (String cfg : config) {
       to.setLocalityGroups("test_ingest", getGroups(cfg));
-      ingest(connector, ROWS * (i+1), 1, 50, ROWS * i);
+      ingest(connector, ROWS * (i + 1), 1, 50, ROWS * i);
       to.flush("test_ingest", null, null, true);
-      verify(connector, 0, 1, 50, ROWS * (i+1));
+      verify(connector, 0, 1, 50, ROWS * (i + 1));
       i++;
     }
     to.delete("test_ingest");
     to.create("test_ingest");
-    config = new String[] {
-        "lg1:colf",
-        null,
-        "lg1:colf,xyz",
-        "lg1:colf;lg2:colf",
-    };
+    config = new String[] {"lg1:colf", null, "lg1:colf,xyz", "lg1:colf;lg2:colf",};
     i = 1;
     for (String cfg : config) {
       ingest(connector, ROWS * i, 1, 50, 0);
@@ -265,11 +261,11 @@ public class ReadWriteIT extends MacTest {
       i++;
     }
   }
-
+  
   private Map<String,Set<Text>> getGroups(String cfg) {
-    Map<String, Set<Text>> groups = new TreeMap<String, Set<Text>>();
+    Map<String,Set<Text>> groups = new TreeMap<String,Set<Text>>();
     if (cfg != null) {
-      for (String group: cfg.split(";")) {
+      for (String group : cfg.split(";")) {
         String[] parts = group.split(":");
         Set<Text> cols = new HashSet<Text>();
         for (String col : parts[1].split(",")) {
@@ -280,5 +276,5 @@ public class ReadWriteIT extends MacTest {
     }
     return groups;
   }
-
+  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
index dc4adc2..741e216 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
@@ -16,7 +16,8 @@
  */
 package org.apache.accumulo.test.functional;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -30,8 +31,9 @@ import org.apache.accumulo.core.conf.Property;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.minicluster.MiniAccumuloConfig;
 import org.apache.accumulo.server.util.CheckForMetadataProblems;
@@ -44,13 +46,13 @@ public class SplitIT extends MacTest {
   
   @Override
   public void configure(MiniAccumuloConfig cfg) {
-    Map<String, String> siteConfig = new HashMap<String, String>();
+    Map<String,String> siteConfig = new HashMap<String,String>();
     siteConfig.put(Property.TSERV_MAXMEM.getKey(), "5K");
-    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
+    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1s");
     cfg.setSiteConfig(siteConfig);
   }
-
-  @Test(timeout=60*1000)
+  
+  @Test(timeout = 120 * 1000)
   public void tabletShouldSplit() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -62,12 +64,12 @@ public class SplitIT extends MacTest {
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
     vopts.rows = opts.rows;
     VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
-    UtilWaitThread.sleep(10*1000);
+    UtilWaitThread.sleep(15 * 1000);
     String id = c.tableOperations().tableIdMap().get("test_ingest");
     Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     KeyExtent extent = new KeyExtent(new Text(id), null, null);
     s.setRange(extent.toMetadataRange());
-    MetadataTable.PREV_ROW_COLUMN.fetch(s);
+    MetadataSchema.TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(s);
     int count = 0;
     int shortened = 0;
     for (Entry<Key,Value> entry : s) {
@@ -78,20 +80,23 @@ public class SplitIT extends MacTest {
     }
     assertTrue(shortened > 0);
     assertTrue(count > 10);
-    assertEquals(0, cluster.exec(CheckForMetadataProblems.class, "-i", cluster.getInstanceName(), "-u", "root", "-p", MacTest.PASSWORD, "-z", cluster.getZooKeepers()).waitFor());
+    assertEquals(0,
+        cluster.exec(CheckForMetadataProblems.class, "-i", cluster.getInstanceName(), "-u", "root", "-p", MacTest.PASSWORD, "-z", cluster.getZooKeepers())
+            .waitFor());
   }
   
-  @Test(timeout=60*1000)
+  @Test(timeout = 60 * 1000)
   public void interleaveSplit() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
     c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_FILE_COMPRESSION_TYPE.getKey(), "none");
     ReadWriteIT.interleaveTest(c);
-    UtilWaitThread.sleep(10*1000);
-    assertTrue(c.tableOperations().listSplits("test_ingest").size() > 20);
+    UtilWaitThread.sleep(5 * 1000);
+    assertTrue(c.tableOperations().listSplits("test_ingest").size() > 10);
   }
   
-  @Test(timeout=120*1000)
+  @Test(timeout = 120 * 1000)
   public void deleteSplit() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
index d92088e..1d02d91 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
@@ -1,4 +1,5 @@
 package org.apache.accumulo.test.functional;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -15,7 +16,9 @@ package org.apache.accumulo.test.functional;
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 import java.util.Map.Entry;
 
@@ -26,9 +29,9 @@ import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.CachedConfiguration;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.util.Admin;
 import org.apache.accumulo.test.TestIngest;
 import org.apache.accumulo.test.VerifyIngest;
@@ -37,10 +40,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.junit.Test;
 
-
 public class TableIT extends MacTest {
   
-  @Test(timeout=60*1000)
+  @Test(timeout = 60 * 1000)
   public void test() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
@@ -52,7 +54,8 @@ public class TableIT extends MacTest {
     Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
     s.setRange(new KeyExtent(new Text(id), null, null).toMetadataRange());
     int count = 0;
-    for (@SuppressWarnings("unused") Entry<Key,Value> entry : s) {
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> entry : s) {
       count++;
     }
     assertTrue(count > 0);
@@ -60,7 +63,8 @@ public class TableIT extends MacTest {
     assertTrue(fs.listStatus(new Path(cluster.getConfig().getDir() + "/accumulo/tables/" + id)).length > 0);
     c.tableOperations().delete("test_ingest");
     count = 0;
-    for (@SuppressWarnings("unused") Entry<Key,Value> entry : s) {
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> entry : s) {
       count++;
     }
     assertEquals(0, count);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
index c9c9d3b..2dc6802 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
@@ -16,6 +16,8 @@
  */
 package org.apache.accumulo.test.functional;
 
+import static org.junit.Assert.fail;
+
 import java.util.Collections;
 import java.util.Map.Entry;
 import java.util.concurrent.TimeUnit;
@@ -34,7 +36,6 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 /**
  * 
@@ -53,7 +54,7 @@ public class TimeoutIT extends MacTest {
     conn.tableOperations().addConstraint("foo1", SlowConstraint.class.getName());
     
     // give constraint time to propagate through zookeeper
-    UtilWaitThread.sleep(250);
+    UtilWaitThread.sleep(1000);
     
     BatchWriter bw = conn.createBatchWriter("foo1", new BatchWriterConfig().setTimeout(3, TimeUnit.SECONDS));
     


[06/50] [abbrv] git commit: ACCUMULO-1267 Record only merge from 1.4 onto trunk

Posted by ct...@apache.org.
ACCUMULO-1267 Record only merge from 1.4 onto trunk


git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1499203 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/e8cdd0b0
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/e8cdd0b0
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/e8cdd0b0

Branch: refs/heads/ACCUMULO-1496
Commit: e8cdd0b0011225a00c6d29fb8d4cffdad1e75c40
Parents: aea4313
Author: Bill Slacum <uj...@apache.org>
Authored: Wed Jul 3 04:18:29 2013 +0000
Committer: Bill Slacum <uj...@apache.org>
Committed: Wed Jul 3 04:18:29 2013 +0000

----------------------------------------------------------------------

----------------------------------------------------------------------



[04/50] [abbrv] git commit: ACCUMULO-1537 converted many more functional tests to integration tests

Posted by ct...@apache.org.
ACCUMULO-1537 converted many more functional tests to integration tests

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1499110 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/aea43136
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/aea43136
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/aea43136

Branch: refs/heads/ACCUMULO-1496
Commit: aea4313624e0bbe815c3de2c0a6aa4af4ad82c26
Parents: bca0567
Author: Eric C. Newton <ec...@apache.org>
Authored: Tue Jul 2 20:57:21 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Tue Jul 2 20:57:21 2013 +0000

----------------------------------------------------------------------
 .../accumulo/core/conf/SiteConfiguration.java   |   2 +-
 .../server/util/CheckForMetadataProblems.java   |   2 +-
 .../accumulo/server/util/MetadataTable.java     |   2 +-
 .../accumulo/test/GCLotsOfCandidatesTest.java   |  53 --
 .../test/functional/FateStarvationTest.java     |  77 ---
 .../accumulo/test/functional/LargeRowTest.java  | 185 -------
 .../test/functional/LogicalTimeTest.java        | 112 -----
 .../accumulo/test/functional/MaxOpenTest.java   | 127 -----
 .../accumulo/test/functional/MergeTest.java     | 142 ------
 .../test/functional/PermissionsTest.java        | 504 -------------------
 .../accumulo/test/functional/RowDeleteTest.java | 113 -----
 .../test/functional/ScanIteratorTest.java       | 136 -----
 .../accumulo/test/functional/ScanRangeTest.java | 250 ---------
 .../test/functional/ScanSessionTimeOutTest.java | 112 -----
 .../test/functional/ServerSideErrorTest.java    | 135 -----
 .../test/functional/SparseColumnFamilyTest.java | 116 -----
 .../accumulo/test/functional/TimeoutTest.java   | 132 -----
 .../accumulo/test/functional/DeleteIT.java      |  11 +-
 .../test/functional/DynamicThreadPoolsIT.java   |   3 +-
 .../test/functional/FateStarvationIT.java       |  64 +++
 .../test/functional/FunctionalTestUtils.java    |  10 +
 .../test/functional/GarbageCollectorIT.java     | 122 +++++
 .../accumulo/test/functional/LargeRowIT.java    | 168 +++++++
 .../accumulo/test/functional/LogicalTimeIT.java |  99 ++++
 .../accumulo/test/functional/MapReduceIT.java   |  80 +++
 .../accumulo/test/functional/MaxOpenIT.java     | 132 +++++
 .../accumulo/test/functional/MergeIT.java       | 182 +++++++
 .../accumulo/test/functional/MergeMetaIT.java   |  61 +++
 .../accumulo/test/functional/PermissionsIT.java | 475 +++++++++++++++++
 .../accumulo/test/functional/ReadWriteIT.java   |  14 +-
 .../accumulo/test/functional/RowDeleteIT.java   | 116 +++++
 .../test/functional/ScanIteratorIT.java         | 125 +++++
 .../accumulo/test/functional/ScanRangeIT.java   | 233 +++++++++
 .../test/functional/ScanSessionTimeOutIT.java   | 104 ++++
 .../test/functional/ServerSideErrorIT.java      | 119 +++++
 .../accumulo/test/functional/ShutdownIT.java    | 104 ++++
 .../functional/SimpleBalancerFairnessIT.java    |  90 ++++
 .../test/functional/SparseColumnFamilyIT.java   | 102 ++++
 .../accumulo/test/functional/SplitIT.java       | 103 ++++
 .../accumulo/test/functional/StartIT.java       |  33 ++
 .../accumulo/test/functional/TableIT.java       |  75 +++
 .../accumulo/test/functional/TabletIT.java      |  47 ++
 .../accumulo/test/functional/TimeoutIT.java     | 115 +++++
 test/system/auto/simple/examples.py             | 348 -------------
 test/system/auto/simple/fateStartvation.py      |  30 --
 test/system/auto/simple/gc.py                   | 120 -----
 test/system/auto/simple/largeRow.py             |  30 --
 test/system/auto/simple/logicalTime.py          |  30 --
 test/system/auto/simple/mapreduce.py            | 142 ------
 test/system/auto/simple/maxOpen.py              |  30 --
 test/system/auto/simple/merge.py                | 107 ----
 test/system/auto/simple/mergeMetadata.py        |  73 ---
 test/system/auto/simple/range.py                |  29 --
 test/system/auto/simple/rowDelete.py            |  30 --
 test/system/auto/simple/scanIter.py             |  30 --
 test/system/auto/simple/scanSessionTimeout.py   |  30 --
 test/system/auto/simple/security.py             |  39 --
 test/system/auto/simple/serverSideError.py      |  30 --
 test/system/auto/simple/shutdown.py             | 108 ----
 test/system/auto/simple/simpleBalancer.py       | 116 -----
 test/system/auto/simple/sparseColumnFamily.py   |  30 --
 test/system/auto/simple/split.py                | 135 -----
 test/system/auto/simple/start.py                |  40 --
 test/system/auto/simple/table.py                | 120 -----
 test/system/auto/simple/tablets.py              |  55 --
 test/system/auto/simple/timeout.py              |  29 --
 66 files changed, 2780 insertions(+), 3938 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java b/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
index b5017cf..b8e1337 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/SiteConfiguration.java
@@ -49,7 +49,7 @@ public class SiteConfiguration extends AccumuloConfiguration {
       xmlConfig = new Configuration(false);
       
       if (SiteConfiguration.class.getClassLoader().getResource(configFile) == null)
-        log.warn(configFile + " not found on classpath");
+        log.warn(configFile + " not found on classpath", new Throwable());
       else
         xmlConfig.addResource(configFile);
     }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java b/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
index dec4733..9c80927 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/CheckForMetadataProblems.java
@@ -195,7 +195,7 @@ public class CheckForMetadataProblems {
     checkMetadataTableEntries(opts, fs);
     opts.stopTracing();
     if (sawProblems)
-      System.exit(-1);
+      throw new RuntimeException();
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java b/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
index 4531fed..3bd2c5b 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/MetadataTable.java
@@ -64,6 +64,7 @@ import org.apache.accumulo.core.util.StringUtil;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.core.zookeeper.ZooUtil;
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.fate.zookeeper.ZooLock;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.server.ServerConstants;
@@ -73,7 +74,6 @@ import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.master.state.TServerInstance;
 import org.apache.accumulo.server.security.SecurityConstants;
-import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/GCLotsOfCandidatesTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/GCLotsOfCandidatesTest.java b/test/src/main/java/org/apache/accumulo/test/GCLotsOfCandidatesTest.java
deleted file mode 100644
index 5b72fcd..0000000
--- a/test/src/main/java/org/apache/accumulo/test/GCLotsOfCandidatesTest.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import org.apache.accumulo.core.cli.BatchWriterOpts;
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.server.cli.ClientOpts;
-import org.apache.hadoop.io.Text;
-
-public class GCLotsOfCandidatesTest {
-  public static void main(String args[]) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
-    ClientOpts opts = new ClientOpts();
-    BatchWriterOpts bwOpts = new BatchWriterOpts();
-    opts.parseArgs(GCLotsOfCandidatesTest.class.getName(), args, bwOpts);
-    
-    Connector conn = opts.getConnector();
-    conn.securityOperations().grantTablePermission(conn.whoami(), MetadataTable.NAME, TablePermission.WRITE);
-    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
-    
-    for (int i = 0; i < 100000; ++i) {
-      final Text emptyText = new Text("");
-      Text row = new Text(String.format("%s%s%020d%s", MetadataTable.DELETED_RANGE.getStartKey().getRow().toString(), "/", i,
-          "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj"));
-      Mutation delFlag = new Mutation(row);
-      delFlag.put(emptyText, emptyText, new Value(new byte[] {}));
-      bw.addMutation(delFlag);
-    }
-    bw.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationTest.java b/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationTest.java
deleted file mode 100644
index 01cf658..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/FateStarvationTest.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-
-import org.apache.accumulo.test.TestIngest;
-import org.apache.hadoop.io.Text;
-
-/**
- * See ACCUMULO-779
- */
-public class FateStarvationTest extends FunctionalTest {
-  
-  @Override
-  public void cleanup() throws Exception {}
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.emptyList();
-  }
-  
-  @Override
-  public void run() throws Exception {
-    getConnector().tableOperations().create("test_ingest");
-    
-    getConnector().tableOperations().addSplits("test_ingest", TestIngest.getSplitPoints(0, 100000, 50));
-    
-    TestIngest.main(new String[] {"-random", "89", "-timestamp", "7", "-size", "" + 50, "100000", "0", "1"});
-    
-    getConnector().tableOperations().flush("test_ingest", null, null, true);
-    
-    List<Text> splits = new ArrayList<Text>(TestIngest.getSplitPoints(0, 100000, 67));
-    Random rand = new Random();
-    
-    for (int i = 0; i < 100; i++) {
-      int idx1 = rand.nextInt(splits.size() - 1);
-      int idx2 = rand.nextInt(splits.size() - (idx1 + 1)) + idx1 + 1;
-      
-      getConnector().tableOperations().compact("test_ingest", splits.get(idx1), splits.get(idx2), false, false);
-    }
-    
-    getConnector().tableOperations().offline("test_ingest");
-  }
-  
-  public static void main(String[] args) throws Exception {
-    ArrayList<String> argsList = new ArrayList<String>();
-    argsList.addAll(Arrays.asList(args));
-    argsList.addAll(Arrays.asList(FateStarvationTest.class.getName(), "run"));
-    FunctionalTest.main(argsList.toArray(new String[0]));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/LargeRowTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LargeRowTest.java b/test/src/main/java/org/apache/accumulo/test/functional/LargeRowTest.java
deleted file mode 100644
index 4e9ded5..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/LargeRowTest.java
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-
-public class LargeRowTest extends FunctionalTest {
-  
-  private static final int SEED = 42;
-  private static final String REG_TABLE_NAME = "lr";
-  private static final String PRE_SPLIT_TABLE_NAME = "lrps";
-  private static final int NUM_ROWS = 100;
-  private static final int ROW_SIZE = 1 << 17;
-  private static final int NUM_PRE_SPLITS = 9;
-  private static final int SPLIT_THRESH = ROW_SIZE * NUM_ROWS / NUM_PRE_SPLITS;
-  
-  @Override
-  public void cleanup() {}
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return parseConfig(Property.TSERV_MAJC_DELAY + "=10ms");
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    
-    Random r = new Random();
-    byte rowData[] = new byte[ROW_SIZE];
-    r.setSeed(SEED + 1);
-    
-    TreeSet<Text> splitPoints = new TreeSet<Text>();
-    
-    for (int i = 0; i < NUM_PRE_SPLITS; i++) {
-      r.nextBytes(rowData);
-      TestIngest.toPrintableChars(rowData);
-      splitPoints.add(new Text(rowData));
-    }
-    
-    ArrayList<TableSetup> tables = new ArrayList<TableSetup>();
-    
-    tables.add(new TableSetup(REG_TABLE_NAME));
-    tables.add(new TableSetup(PRE_SPLIT_TABLE_NAME, splitPoints));
-    
-    return tables;
-    // return Collections.singletonList(new TableSetup(TABLE_NAME));
-  }
-  
-  @Override
-  public void run() throws Exception {
-    
-    // Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
-    // logger.setLevel(Level.TRACE);
-    
-    test1();
-    test2();
-  }
-  
-  private void test1() throws Exception {
-    
-    basicTest(REG_TABLE_NAME, 0);
-    
-    getConnector().tableOperations().setProperty(REG_TABLE_NAME, Property.TABLE_SPLIT_THRESHOLD.getKey(), "" + SPLIT_THRESH);
-    
-    UtilWaitThread.sleep(12000);
-    Logger.getLogger(LargeRowTest.class).warn("checking splits");
-    checkSplits(REG_TABLE_NAME, NUM_PRE_SPLITS / 2, NUM_PRE_SPLITS * 4);
-    
-    verify(REG_TABLE_NAME);
-  }
-  
-  private void test2() throws Exception {
-    basicTest(PRE_SPLIT_TABLE_NAME, NUM_PRE_SPLITS);
-  }
-  
-  private void basicTest(String table, int expectedSplits) throws Exception {
-    BatchWriter bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
-    
-    Random r = new Random();
-    byte rowData[] = new byte[ROW_SIZE];
-    
-    r.setSeed(SEED);
-    
-    for (int i = 0; i < NUM_ROWS; i++) {
-      
-      r.nextBytes(rowData);
-      TestIngest.toPrintableChars(rowData);
-      
-      Mutation mut = new Mutation(new Text(rowData));
-      mut.put(new Text(""), new Text(""), new Value(("" + i).getBytes()));
-      bw.addMutation(mut);
-    }
-    
-    bw.close();
-    
-    checkSplits(table, expectedSplits, expectedSplits);
-    
-    verify(table);
-    
-    checkSplits(table, expectedSplits, expectedSplits);
-    
-    getConnector().tableOperations().flush(table, null, null, false);
-    
-    // verify while table flush is running
-    verify(table);
-    
-    // give split time to complete
-    getConnector().tableOperations().flush(table, null, null, true);
-    
-    checkSplits(table, expectedSplits, expectedSplits);
-    
-    verify(table);
-    
-    checkSplits(table, expectedSplits, expectedSplits);
-  }
-  
-  private void verify(String table) throws Exception {
-    Random r = new Random();
-    byte rowData[] = new byte[ROW_SIZE];
-    
-    r.setSeed(SEED);
-    
-    Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
-    
-    for (int i = 0; i < NUM_ROWS; i++) {
-      
-      r.nextBytes(rowData);
-      TestIngest.toPrintableChars(rowData);
-      
-      scanner.setRange(new Range(new Text(rowData)));
-      
-      int count = 0;
-      
-      for (Entry<Key,Value> entry : scanner) {
-        if (!entry.getKey().getRow().equals(new Text(rowData))) {
-          throw new Exception("verification failed, unexpected row i =" + i);
-        }
-        if (!entry.getValue().equals(Integer.toString(i).getBytes())) {
-          throw new Exception("verification failed, unexpected value i =" + i + " value = " + entry.getValue());
-        }
-        count++;
-      }
-      
-      if (count != 1) {
-        throw new Exception("verification failed, unexpected count i =" + i + " count=" + count);
-      }
-      
-    }
-    
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeTest.java b/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeTest.java
deleted file mode 100644
index 8829b92..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/LogicalTimeTest.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.io.Text;
-
-public class LogicalTimeTest extends FunctionalTest {
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.emptyList();
-  }
-  
-  @Override
-  public void run() throws Exception {
-    int tc = 0;
-    
-    runMergeTest("foo" + tc++, new String[] {"m"}, new String[] {"a"}, null, null, "b", 2l);
-    runMergeTest("foo" + tc++, new String[] {"m"}, new String[] {"z"}, null, null, "b", 2l);
-    runMergeTest("foo" + tc++, new String[] {"m"}, new String[] {"a", "z"}, null, null, "b", 2l);
-    runMergeTest("foo" + tc++, new String[] {"m"}, new String[] {"a", "c", "z"}, null, null, "b", 3l);
-    runMergeTest("foo" + tc++, new String[] {"m"}, new String[] {"a", "y", "z"}, null, null, "b", 3l);
-    
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, null, "b", 2l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, null, "b", 2l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, null, "b", 2l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, null, "b", 2l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, null, "b", 3l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, null, "b", 3l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, null, "b", 3l);
-    
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"a"}, null, "h", "b", 2l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"h"}, null, "h", "b", 2l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"s"}, null, "h", "b", 1l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s"}, null, "h", "b", 2l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "c", "h", "s"}, null, "h", "b", 3l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"a", "h", "s", "i"}, null, "h", "b", 3l);
-    runMergeTest("foo" + tc++, new String[] {"g", "r"}, new String[] {"t", "a", "h", "s"}, null, "h", "b", 2l);
-    
-  }
-  
-  private void runMergeTest(String table, String[] splits, String[] inserts, String start, String end, String last, long expected) throws Exception {
-    Connector conn = super.getConnector();
-    conn.tableOperations().create(table, true, TimeType.LOGICAL);
-    TreeSet<Text> splitSet = new TreeSet<Text>();
-    for (String split : splits) {
-      splitSet.add(new Text(split));
-    }
-    conn.tableOperations().addSplits(table, splitSet);
-    
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
-    for (String row : inserts) {
-      Mutation m = new Mutation(row);
-      m.put("cf", "cq", "v");
-      bw.addMutation(m);
-    }
-    
-    bw.flush();
-    
-    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
-    
-    Mutation m = new Mutation(last);
-    m.put("cf", "cq", "v");
-    bw.addMutation(m);
-    bw.flush();
-    
-    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
-    scanner.setRange(new Range(last));
-    
-    bw.close();
-    
-    long time = scanner.iterator().next().getKey().getTimestamp();
-    if (time != expected)
-      throw new RuntimeException("unexpected time " + time + " " + expected);
-  }
-  
-  @Override
-  public void cleanup() throws Exception {}
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenTest.java b/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenTest.java
deleted file mode 100644
index 6dea01c..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/MaxOpenTest.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Random;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.test.TestIngest;
-import org.apache.accumulo.test.VerifyIngest;
-
-/**
- * A functional test that excercises hitting the max open file limit on a tablet server. This test assumes there are one or two tablet servers.
- * 
- * 
- */
-
-public class MaxOpenTest extends FunctionalTest {
-  
-  private static final int NUM_TABLETS = 16;
-  private static final int NUM_TO_INGEST = 10000;
-  
-  @Override
-  public void cleanup() {}
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return parseConfig(Property.TSERV_SCAN_MAX_OPENFILES + "=4", Property.TSERV_MAJC_MAXCONCURRENT + "=1", Property.TSERV_MAJC_THREAD_MAXOPEN + "=2");
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    Map<String,String> config = parseConfig(Property.TABLE_MAJC_RATIO + "=10");
-    TableSetup ts = new TableSetup("test_ingest", config, TestIngest.getSplitPoints(0, NUM_TO_INGEST, NUM_TABLETS));
-    return Collections.singletonList(ts);
-  }
-  
-  @Override
-  public void run() throws Exception {
-    
-    // the following loop should create three tablets in each map file
-    for (int i = 0; i < 3; i++) {
-      
-      TestIngest.main(new String[] {"-random", "" + i, "-timestamp", "" + i, "-size", "" + 50, "" + NUM_TO_INGEST, "0", "1"});
-      
-      getConnector().tableOperations().flush("test_ingest", null, null, true);
-      checkRFiles("test_ingest", NUM_TABLETS, NUM_TABLETS, i + 1, i + 1);
-    }
-    
-    List<Range> ranges = new ArrayList<Range>(NUM_TO_INGEST);
-    
-    for (int i = 0; i < NUM_TO_INGEST; i++) {
-      ranges.add(new Range(TestIngest.generateRow(i, 0)));
-    }
-    
-    long time1 = batchScan(ranges, 1);
-    // run it again, now that stuff is cached on the client and sever
-    time1 = batchScan(ranges, 1);
-    long time2 = batchScan(ranges, NUM_TABLETS);
-    
-    System.out.printf("Single thread scan time   %6.2f %n", time1 / 1000.0);
-    System.out.printf("Multiple thread scan time %6.2f %n", time2 / 1000.0);
-    
-  }
-  
-  private long batchScan(List<Range> ranges, int threads) throws Exception {
-    BatchScanner bs = getConnector().createBatchScanner("test_ingest", TestIngest.AUTHS, threads);
-    
-    bs.setRanges(ranges);
-    
-    int count = 0;
-    
-    long t1 = System.currentTimeMillis();
-    
-    byte rval[] = new byte[50];
-    Random random = new Random();
-    
-    for (Entry<Key,Value> entry : bs) {
-      count++;
-      int row = VerifyIngest.getRow(entry.getKey());
-      int col = VerifyIngest.getCol(entry.getKey());
-      
-      if (row < 0 || row >= NUM_TO_INGEST) {
-        throw new Exception("unexcepted row " + row);
-      }
-      
-      rval = TestIngest.genRandomValue(random, rval, 2, row, col);
-      
-      if (entry.getValue().compareTo(rval) != 0) {
-        throw new Exception("unexcepted value row=" + row + " col=" + col);
-      }
-    }
-    
-    long t2 = System.currentTimeMillis();
-    
-    bs.close();
-    
-    if (count != NUM_TO_INGEST) {
-      throw new Exception("Batch Scan did not return expected number of values " + count);
-    }
-    
-    return t2 - t1;
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/MergeTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MergeTest.java b/test/src/main/java/org/apache/accumulo/test/functional/MergeTest.java
deleted file mode 100644
index f997ff2..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/MergeTest.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.admin.TimeType;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.io.Text;
-
-/**
- * 
- */
-public class MergeTest extends FunctionalTest {
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.emptyList();
-  }
-  
-  private String[] ns(String... strings) {
-    return strings;
-  }
-  
-  @Override
-  public void run() throws Exception {
-    int tc = 0;
-    
-    runMergeTest("foo" + tc++, ns(), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
-    
-    runMergeTest("foo" + tc++, ns("m"), ns(), ns("l", "m", "n"), ns(null, "l"), ns(null, "n"));
-    runMergeTest("foo" + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns("m", "n"), ns(null, "z"));
-    runMergeTest("foo" + tc++, ns("m"), ns("m"), ns("l", "m", "n"), ns(null, "b"), ns("l", "m"));
-    
-    runMergeTest("foo" + tc++, ns("b", "m", "r"), ns(), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns(null, "s"));
-    runMergeTest("foo" + tc++, ns("b", "m", "r"), ns("m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("c", "m"));
-    runMergeTest("foo" + tc++, ns("b", "m", "r"), ns("r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("n", "r"));
-    runMergeTest("foo" + tc++, ns("b", "m", "r"), ns("b"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns(null, "s"));
-    runMergeTest("foo" + tc++, ns("b", "m", "r"), ns("b", "m"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns(null, "s"));
-    runMergeTest("foo" + tc++, ns("b", "m", "r"), ns("b", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("q", "r"));
-    runMergeTest("foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns(null, "a"), ns("aa", "b"));
-    runMergeTest("foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("r", "s"), ns(null, "z"));
-    runMergeTest("foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("b", "c"), ns("l", "m"));
-    runMergeTest("foo" + tc++, ns("b", "m", "r"), ns("b", "m", "r"), ns("a", "b", "c", "l", "m", "n", "q", "r", "s"), ns("m", "n"), ns("q", "r"));
-    
-  }
-  
-  private void runMergeTest(String table, String[] splits, String[] expectedSplits, String[] inserts, String[] start, String[] end) throws Exception {
-    int count = 0;
-    
-    for (String s : start) {
-      for (String e : end) {
-        runMergeTest(table + "_" + count++, splits, expectedSplits, inserts, s, e);
-      }
-    }
-  }
-  
-  private void runMergeTest(String table, String[] splits, String[] expectedSplits, String[] inserts, String start, String end) throws Exception {
-    System.out.println("Running merge test " + table + " " + Arrays.asList(splits) + " " + start + " " + end);
-    
-    Connector conn = super.getConnector();
-    conn.tableOperations().create(table, true, TimeType.LOGICAL);
-    TreeSet<Text> splitSet = new TreeSet<Text>();
-    for (String split : splits) {
-      splitSet.add(new Text(split));
-    }
-    conn.tableOperations().addSplits(table, splitSet);
-    
-    BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig());
-    HashSet<String> expected = new HashSet<String>();
-    for (String row : inserts) {
-      Mutation m = new Mutation(row);
-      m.put("cf", "cq", row);
-      bw.addMutation(m);
-      expected.add(row);
-    }
-    
-    bw.close();
-    
-    conn.tableOperations().merge(table, start == null ? null : new Text(start), end == null ? null : new Text(end));
-    
-    Scanner scanner = conn.createScanner(table, Authorizations.EMPTY);
-    
-    HashSet<String> observed = new HashSet<String>();
-    for (Entry<Key,Value> entry : scanner) {
-      String row = entry.getKey().getRowData().toString();
-      if (!observed.add(row)) {
-        throw new Exception("Saw data twice " + table + " " + row);
-      }
-    }
-    
-    if (!observed.equals(expected)) {
-      throw new Exception("data inconsistency " + table + " " + observed + " != " + expected);
-    }
-    
-    HashSet<Text> currentSplits = new HashSet<Text>(conn.tableOperations().listSplits(table));
-    HashSet<Text> ess = new HashSet<Text>();
-    for (String es : expectedSplits) {
-      ess.add(new Text(es));
-    }
-    
-    if (!currentSplits.equals(ess)) {
-      throw new Exception("split inconsistency " + table + " " + currentSplits + " != " + ess);
-    }
-  }
-  
-  @Override
-  public void cleanup() throws Exception {}
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/PermissionsTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/PermissionsTest.java b/test/src/main/java/org/apache/accumulo/test/functional/PermissionsTest.java
deleted file mode 100644
index 4dbd966..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/PermissionsTest.java
+++ /dev/null
@@ -1,504 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-
-import org.apache.accumulo.core.client.AccumuloException;
-import org.apache.accumulo.core.client.AccumuloSecurityException;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Instance;
-import org.apache.accumulo.core.client.MutationsRejectedException;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.TableExistsException;
-import org.apache.accumulo.core.client.TableNotFoundException;
-import org.apache.accumulo.core.client.impl.Tables;
-import org.apache.accumulo.core.client.security.SecurityErrorCode;
-import org.apache.accumulo.core.client.security.tokens.PasswordToken;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.security.SystemPermission;
-import org.apache.accumulo.core.security.TablePermission;
-import org.apache.accumulo.core.util.MetadataTable;
-import org.apache.accumulo.server.conf.ServerConfiguration;
-import org.apache.hadoop.io.Text;
-import org.apache.log4j.Logger;
-
-public class PermissionsTest {
-  private static final String TEST_USER = "test_user";
-  private static final PasswordToken TEST_PASS = new PasswordToken("test_password");
-  
-  public static class SystemPermissionsTest extends FunctionalTest {
-    private static final Logger log = Logger.getLogger(SystemPermissionsTest.class);
-    
-    @Override
-    public void cleanup() throws Exception {}
-    
-    @Override
-    public Map<String,String> getInitialConfig() {
-      return Collections.emptyMap();
-    }
-    
-    @Override
-    public List<TableSetup> getTablesToCreate() {
-      return Collections.emptyList();
-    }
-    
-    @Override
-    public void run() throws AccumuloException, AccumuloSecurityException, TableNotFoundException, TableExistsException {
-      // verify that the test is being run by root
-      verifyHasOnlyTheseSystemPermissions(getConnector(), getConnector().whoami(), SystemPermission.values());
-      
-      // create the test user
-      getConnector().securityOperations().createLocalUser(TEST_USER, TEST_PASS);
-      Connector test_user_conn = getInstance().getConnector(TEST_USER, TEST_PASS);
-      verifyHasNoSystemPermissions(getConnector(), TEST_USER, SystemPermission.values());
-      
-      // test each permission
-      for (SystemPermission perm : SystemPermission.values()) {
-        log.debug("Verifying the " + perm + " permission");
-        
-        // verify GRANT can't be granted
-        if (perm.equals(SystemPermission.GRANT)) {
-          try {
-            getConnector().securityOperations().grantSystemPermission(TEST_USER, perm);
-          } catch (AccumuloSecurityException e) {
-            verifyHasNoSystemPermissions(getConnector(), TEST_USER, perm);
-            continue;
-          }
-          throw new IllegalStateException("Should NOT be able to grant GRANT");
-        }
-        
-        // test permission before and after granting it
-        testMissingSystemPermission(getConnector(), test_user_conn, perm);
-        getConnector().securityOperations().grantSystemPermission(TEST_USER, perm);
-        verifyHasOnlyTheseSystemPermissions(getConnector(), TEST_USER, perm);
-        testGrantedSystemPermission(getConnector(), test_user_conn, perm);
-        getConnector().securityOperations().revokeSystemPermission(TEST_USER, perm);
-        verifyHasNoSystemPermissions(getConnector(), TEST_USER, perm);
-      }
-    }
-    
-    private static void testMissingSystemPermission(Connector root_conn, Connector test_user_conn, SystemPermission perm) throws AccumuloException,
-        TableExistsException, AccumuloSecurityException, TableNotFoundException {
-      String tableName, tableId, user, password = "password";
-      log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
-      
-      // test permission prior to granting it
-      switch (perm) {
-        case CREATE_TABLE:
-          tableName = "__CREATE_TABLE_WITHOUT_PERM_TEST__";
-          try {
-            test_user_conn.tableOperations().create(tableName);
-            throw new IllegalStateException("Should NOT be able to create a table");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || root_conn.tableOperations().list().contains(tableName))
-              throw e;
-          }
-          break;
-        case DROP_TABLE:
-          tableName = "__DROP_TABLE_WITHOUT_PERM_TEST__";
-          root_conn.tableOperations().create(tableName);
-          try {
-            test_user_conn.tableOperations().delete(tableName);
-            throw new IllegalStateException("Should NOT be able to delete a table");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.tableOperations().list().contains(tableName))
-              throw e;
-          }
-          break;
-        case ALTER_TABLE:
-          tableName = "__ALTER_TABLE_WITHOUT_PERM_TEST__";
-          root_conn.tableOperations().create(tableName);
-          tableId = Tables.getNameToIdMap(root_conn.getInstance()).get(tableName);
-          try {
-            test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
-            throw new IllegalStateException("Should NOT be able to set a table property");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-                || ServerConfiguration.getTableConfiguration(root_conn.getInstance(), tableId).get(Property.TABLE_BLOOM_ERRORRATE).equals("003.14159%"))
-              throw e;
-          }
-          root_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
-          try {
-            test_user_conn.tableOperations().removeProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey());
-            throw new IllegalStateException("Should NOT be able to remove a table property");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-                || !ServerConfiguration.getTableConfiguration(root_conn.getInstance(), tableId).get(Property.TABLE_BLOOM_ERRORRATE).equals("003.14159%"))
-              throw e;
-          }
-          String table2 = tableName + "2";
-          try {
-            test_user_conn.tableOperations().rename(tableName, table2);
-            throw new IllegalStateException("Should NOT be able to rename a table");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.tableOperations().list().contains(tableName)
-                || root_conn.tableOperations().list().contains(table2))
-              throw e;
-          }
-          break;
-        case CREATE_USER:
-          user = "__CREATE_USER_WITHOUT_PERM_TEST__";
-          try {
-            test_user_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
-            throw new IllegalStateException("Should NOT be able to create a user");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-                || root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
-              throw e;
-          }
-          break;
-        case DROP_USER:
-          user = "__DROP_USER_WITHOUT_PERM_TEST__";
-          root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
-          try {
-            test_user_conn.securityOperations().dropLocalUser(user);
-            throw new IllegalStateException("Should NOT be able to delete a user");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED
-                || !root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
-              throw e;
-          }
-          break;
-        case ALTER_USER:
-          user = "__ALTER_USER_WITHOUT_PERM_TEST__";
-          root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
-          try {
-            test_user_conn.securityOperations().changeUserAuthorizations(user, new Authorizations("A", "B"));
-            throw new IllegalStateException("Should NOT be able to alter a user");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED || !root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
-              throw e;
-          }
-          break;
-        case SYSTEM:
-          // test for system permission would go here
-          break;
-        default:
-          throw new IllegalArgumentException("Unrecognized System Permission: " + perm);
-      }
-    }
-    
-    private static void testGrantedSystemPermission(Connector root_conn, Connector test_user_conn, SystemPermission perm) throws AccumuloException,
-        AccumuloSecurityException, TableNotFoundException, TableExistsException {
-      String tableName, tableId, user, password = "password";
-      log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
-      
-      // test permission after granting it
-      switch (perm) {
-        case CREATE_TABLE:
-          tableName = "__CREATE_TABLE_WITH_PERM_TEST__";
-          test_user_conn.tableOperations().create(tableName);
-          if (!root_conn.tableOperations().list().contains(tableName))
-            throw new IllegalStateException("Should be able to create a table");
-          break;
-        case DROP_TABLE:
-          tableName = "__DROP_TABLE_WITH_PERM_TEST__";
-          root_conn.tableOperations().create(tableName);
-          test_user_conn.tableOperations().delete(tableName);
-          if (root_conn.tableOperations().list().contains(tableName))
-            throw new IllegalStateException("Should be able to delete a table");
-          break;
-        case ALTER_TABLE:
-          tableName = "__ALTER_TABLE_WITH_PERM_TEST__";
-          String table2 = tableName + "2";
-          root_conn.tableOperations().create(tableName);
-          tableId = Tables.getNameToIdMap(root_conn.getInstance()).get(tableName);
-          Instance instance = root_conn.getInstance();
-          test_user_conn.tableOperations().setProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey(), "003.14159%");
-          if (!ServerConfiguration.getTableConfiguration(instance, tableId).get(Property.TABLE_BLOOM_ERRORRATE).equals("003.14159%"))
-            throw new IllegalStateException("Should be able to set a table property");
-          test_user_conn.tableOperations().removeProperty(tableName, Property.TABLE_BLOOM_ERRORRATE.getKey());
-          if (ServerConfiguration.getTableConfiguration(instance, tableId).get(Property.TABLE_BLOOM_ERRORRATE).equals("003.14159%"))
-            throw new IllegalStateException("Should be able to remove a table property");
-          test_user_conn.tableOperations().rename(tableName, table2);
-          if (root_conn.tableOperations().list().contains(tableName) || !root_conn.tableOperations().list().contains(table2))
-            throw new IllegalStateException("Should be able to rename a table");
-          break;
-        case CREATE_USER:
-          user = "__CREATE_USER_WITH_PERM_TEST__";
-          test_user_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
-          if (!root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
-            throw new IllegalStateException("Should be able to create a user");
-          break;
-        case DROP_USER:
-          user = "__DROP_USER_WITH_PERM_TEST__";
-          root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
-          test_user_conn.securityOperations().dropLocalUser(user);
-          if (root_conn.securityOperations().authenticateUser(user, new PasswordToken(password)))
-            throw new IllegalStateException("Should be able to delete a user");
-          break;
-        case ALTER_USER:
-          user = "__ALTER_USER_WITH_PERM_TEST__";
-          root_conn.securityOperations().createLocalUser(user, new PasswordToken(password));
-          test_user_conn.securityOperations().changeUserAuthorizations(user, new Authorizations("A", "B"));
-          if (root_conn.securityOperations().getUserAuthorizations(user).isEmpty())
-            throw new IllegalStateException("Should be able to alter a user");
-          break;
-        case SYSTEM:
-          // test for system permission would go here
-          break;
-        default:
-          throw new IllegalArgumentException("Unrecognized System Permission: " + perm);
-      }
-    }
-    
-    private static void verifyHasOnlyTheseSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException,
-        AccumuloSecurityException {
-      List<SystemPermission> permList = Arrays.asList(perms);
-      for (SystemPermission p : SystemPermission.values()) {
-        if (permList.contains(p)) {
-          // should have these
-          if (!root_conn.securityOperations().hasSystemPermission(user, p))
-            throw new IllegalStateException(user + " SHOULD have system permission " + p);
-        } else {
-          // should not have these
-          if (root_conn.securityOperations().hasSystemPermission(user, p))
-            throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
-        }
-      }
-    }
-    
-    private static void verifyHasNoSystemPermissions(Connector root_conn, String user, SystemPermission... perms) throws AccumuloException,
-        AccumuloSecurityException {
-      for (SystemPermission p : perms)
-        if (root_conn.securityOperations().hasSystemPermission(user, p))
-          throw new IllegalStateException(user + " SHOULD NOT have system permission " + p);
-    }
-  }
-  
-  public static class TablePermissionsTest extends FunctionalTest {
-    private static final Logger log = Logger.getLogger(SystemPermissionsTest.class);
-    private static final String TEST_TABLE = "__TABLE_PERMISSION_TEST__";
-    
-    @Override
-    public void cleanup() throws Exception {}
-    
-    @Override
-    public Map<String,String> getInitialConfig() {
-      return Collections.emptyMap();
-    }
-    
-    @Override
-    public List<TableSetup> getTablesToCreate() {
-      return Collections.emptyList();
-    }
-    
-    @Override
-    public void run() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, MutationsRejectedException {
-      // create the test user
-      getConnector().securityOperations().createLocalUser(TEST_USER, TEST_PASS);
-      Connector test_user_conn = getInstance().getConnector(TEST_USER, TEST_PASS);
-      
-      // check for read-only access to metadata table
-      verifyHasOnlyTheseTablePermissions(getConnector(), getConnector().whoami(), MetadataTable.NAME, TablePermission.READ,
-          TablePermission.ALTER_TABLE);
-      verifyHasOnlyTheseTablePermissions(getConnector(), TEST_USER, MetadataTable.NAME, TablePermission.READ);
-      
-      // test each permission
-      for (TablePermission perm : TablePermission.values()) {
-        log.debug("Verifying the " + perm + " permission");
-        
-        // test permission before and after granting it
-        createTestTable();
-        testMissingTablePermission(getConnector(), test_user_conn, perm);
-        getConnector().securityOperations().grantTablePermission(TEST_USER, TEST_TABLE, perm);
-        verifyHasOnlyTheseTablePermissions(getConnector(), TEST_USER, TEST_TABLE, perm);
-        testGrantedTablePermission(getConnector(), test_user_conn, perm);
-        
-        createTestTable();
-        getConnector().securityOperations().revokeTablePermission(TEST_USER, TEST_TABLE, perm);
-        verifyHasNoTablePermissions(getConnector(), TEST_USER, TEST_TABLE, perm);
-      }
-    }
-    
-    private void createTestTable() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException,
-        MutationsRejectedException {
-      if (!getConnector().tableOperations().exists(TEST_TABLE)) {
-        // create the test table
-        getConnector().tableOperations().create(TEST_TABLE);
-        // put in some initial data
-        BatchWriter writer = getConnector().createBatchWriter(TEST_TABLE, new BatchWriterConfig());
-        Mutation m = new Mutation(new Text("row"));
-        m.put(new Text("cf"), new Text("cq"), new Value("val".getBytes()));
-        writer.addMutation(m);
-        writer.close();
-        
-        // verify proper permissions for creator and test user
-        verifyHasOnlyTheseTablePermissions(getConnector(), getConnector().whoami(), TEST_TABLE, TablePermission.values());
-        verifyHasNoTablePermissions(getConnector(), TEST_USER, TEST_TABLE, TablePermission.values());
-        
-      }
-    }
-    
-    private static void testMissingTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm) throws AccumuloException,
-        AccumuloSecurityException, TableNotFoundException {
-      Scanner scanner;
-      BatchWriter writer;
-      Mutation m;
-      log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
-      
-      // test permission prior to granting it
-      switch (perm) {
-        case READ:
-          try {
-            scanner = test_user_conn.createScanner(TEST_TABLE, Authorizations.EMPTY);
-            int i = 0;
-            for (Entry<Key,Value> entry : scanner)
-              i += 1 + entry.getKey().getRowData().length();
-            if (i != 0)
-              throw new IllegalStateException("Should NOT be able to read from the table");
-          } catch (RuntimeException e) {
-            AccumuloSecurityException se = (AccumuloSecurityException) e.getCause();
-            if (se.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
-              throw se;
-          }
-          break;
-        case WRITE:
-          try {
-            writer = test_user_conn.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
-            m = new Mutation(new Text("row"));
-            m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
-            writer.addMutation(m);
-            try {
-              writer.close();
-            } catch (MutationsRejectedException e1) {
-              if (e1.getAuthorizationFailuresMap().size() > 0)
-                throw new AccumuloSecurityException(test_user_conn.whoami(), org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.PERMISSION_DENIED,
-                    e1);
-            }
-            throw new IllegalStateException("Should NOT be able to write to a table");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
-              throw e;
-          }
-          break;
-        case BULK_IMPORT:
-          // test for bulk import permission would go here
-          break;
-        case ALTER_TABLE:
-          Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
-          groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
-          try {
-            test_user_conn.tableOperations().setLocalityGroups(TEST_TABLE, groups);
-            throw new IllegalStateException("User should not be able to set locality groups");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
-              throw e;
-          }
-          break;
-        case DROP_TABLE:
-          try {
-            test_user_conn.tableOperations().delete(TEST_TABLE);
-            throw new IllegalStateException("User should not be able delete the table");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
-              throw e;
-          }
-          break;
-        case GRANT:
-          try {
-            test_user_conn.securityOperations().grantTablePermission("root", TEST_TABLE, TablePermission.GRANT);
-            throw new IllegalStateException("User should not be able grant permissions");
-          } catch (AccumuloSecurityException e) {
-            if (e.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
-              throw e;
-          }
-          break;
-        default:
-          throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
-      }
-    }
-    
-    private static void testGrantedTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm) throws AccumuloException,
-        TableExistsException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
-      Scanner scanner;
-      BatchWriter writer;
-      Mutation m;
-      log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
-      
-      // test permission after granting it
-      switch (perm) {
-        case READ:
-          scanner = test_user_conn.createScanner(TEST_TABLE, Authorizations.EMPTY);
-          Iterator<Entry<Key,Value>> iter = scanner.iterator();
-          while (iter.hasNext())
-            iter.next();
-          break;
-        case WRITE:
-          writer = test_user_conn.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
-          m = new Mutation(new Text("row"));
-          m.put(new Text("a"), new Text("b"), new Value("c".getBytes()));
-          writer.addMutation(m);
-          writer.close();
-          break;
-        case BULK_IMPORT:
-          // test for bulk import permission would go here
-          break;
-        case ALTER_TABLE:
-          Map<String,Set<Text>> groups = new HashMap<String,Set<Text>>();
-          groups.put("tgroup", new HashSet<Text>(Arrays.asList(new Text("t1"), new Text("t2"))));
-          break;
-        case DROP_TABLE:
-          test_user_conn.tableOperations().delete(TEST_TABLE);
-          break;
-        case GRANT:
-          test_user_conn.securityOperations().grantTablePermission("root", TEST_TABLE, TablePermission.GRANT);
-          break;
-        default:
-          throw new IllegalArgumentException("Unrecognized table Permission: " + perm);
-      }
-    }
-    
-    private static void verifyHasOnlyTheseTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
-        AccumuloSecurityException {
-      List<TablePermission> permList = Arrays.asList(perms);
-      for (TablePermission p : TablePermission.values()) {
-        if (permList.contains(p)) {
-          // should have these
-          if (!root_conn.securityOperations().hasTablePermission(user, table, p))
-            throw new IllegalStateException(user + " SHOULD have table permission " + p + " for table " + table);
-        } else {
-          // should not have these
-          if (root_conn.securityOperations().hasTablePermission(user, table, p))
-            throw new IllegalStateException(user + " SHOULD NOT have table permission " + p + " for table " + table);
-        }
-      }
-    }
-    
-    private static void verifyHasNoTablePermissions(Connector root_conn, String user, String table, TablePermission... perms) throws AccumuloException,
-        AccumuloSecurityException {
-      for (TablePermission p : perms)
-        if (root_conn.securityOperations().hasTablePermission(user, table, p))
-          throw new IllegalStateException(user + " SHOULD NOT have table permission " + p + " for table " + table);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/RowDeleteTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/RowDeleteTest.java b/test/src/main/java/org/apache/accumulo/test/functional/RowDeleteTest.java
deleted file mode 100644
index cbf22d3..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/RowDeleteTest.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
-import org.apache.accumulo.core.iterators.user.RowDeletingIterator;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-
-public class RowDeleteTest extends FunctionalTest {
-  
-  @Override
-  public void cleanup() throws Exception {}
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    HashMap<String,String> conf = new HashMap<String,String>();
-    conf.put(Property.TSERV_MAJC_DELAY.getKey(), "50ms");
-    return conf;
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    TableSetup ts1 = new TableSetup("rdel1", parseConfig(Property.TABLE_LOCALITY_GROUPS + "=lg1,dg", Property.TABLE_LOCALITY_GROUP_PREFIX + "lg1=foo",
-        Property.TABLE_LOCALITY_GROUP_PREFIX + "dg=",
-        Property.TABLE_ITERATOR_PREFIX + "" + IteratorScope.majc + ".rdel=30," + RowDeletingIterator.class.getName(), Property.TABLE_MAJC_RATIO + "=100"));
-    return Collections.singletonList(ts1);
-  }
-  
-  @Override
-  public void run() throws Exception {
-    BatchWriter bw = getConnector().createBatchWriter("rdel1", new BatchWriterConfig());
-    
-    bw.addMutation(nm("r1", "foo", "cf1", "v1"));
-    bw.addMutation(nm("r1", "bar", "cf1", "v2"));
-    
-    bw.flush();
-    getConnector().tableOperations().flush("rdel1", null, null, true);
-    
-    checkRFiles("rdel1", 1, 1, 1, 1);
-    
-    int count = 0;
-    Scanner scanner = getConnector().createScanner("rdel1", Authorizations.EMPTY);
-    for (@SuppressWarnings("unused")
-    Entry<Key,Value> entry : scanner) {
-      count++;
-    }
-    if (count != 2)
-      throw new Exception("1 count=" + count);
-    
-    bw.addMutation(nm("r1", "", "", RowDeletingIterator.DELETE_ROW_VALUE));
-    
-    bw.flush();
-    getConnector().tableOperations().flush("rdel1", null, null, true);
-    
-    // Wait for the files in HDFS to be older than the future compaction date
-    UtilWaitThread.sleep(2000);
-    
-    checkRFiles("rdel1", 1, 1, 2, 2);
-    
-    count = 0;
-    scanner = getConnector().createScanner("rdel1", Authorizations.EMPTY);
-    for (@SuppressWarnings("unused")
-    Entry<Key,Value> entry : scanner) {
-      count++;
-    }
-    if (count != 3)
-      throw new Exception("2 count=" + count);
-    
-    getConnector().tableOperations().compact("rdel1", null, null, false, true);
-    
-    checkRFiles("rdel1", 1, 1, 0, 0);
-    
-    count = 0;
-    scanner = getConnector().createScanner("rdel1", Authorizations.EMPTY);
-    for (@SuppressWarnings("unused")
-    Entry<Key,Value> entry : scanner) {
-      count++;
-    }
-    if (count != 0)
-      throw new Exception("3 count=" + count);
-    
-    bw.close();
-    
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/ScanIteratorTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScanIteratorTest.java b/test/src/main/java/org/apache/accumulo/test/functional/ScanIteratorTest.java
deleted file mode 100644
index 5e42525..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/ScanIteratorTest.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchScanner;
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.IteratorSetting;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.client.ScannerBase;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.io.Text;
-
-public class ScanIteratorTest extends FunctionalTest {
-  
-  @Override
-  public void cleanup() throws Exception {}
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.singletonList(new TableSetup("foo"));
-  }
-  
-  @Override
-  public void run() throws Exception {
-    
-    BatchWriter bw = getConnector().createBatchWriter("foo", new BatchWriterConfig());
-    
-    for (int i = 0; i < 1000; i++) {
-      Mutation m = new Mutation(new Text(String.format("%06d", i)));
-      m.put(new Text("cf1"), new Text("cq1"), new Value(("" + (1000 - i)).getBytes()));
-      m.put(new Text("cf1"), new Text("cq2"), new Value(("" + (i - 1000)).getBytes()));
-      
-      bw.addMutation(m);
-    }
-    
-    bw.close();
-    
-    Scanner scanner = getConnector().createScanner("foo", new Authorizations());
-    
-    setupIter(scanner);
-    verify(scanner, 1, 999);
-    
-    BatchScanner bscanner = getConnector().createBatchScanner("foo", new Authorizations(), 3);
-    bscanner.setRanges(Collections.singleton(new Range((Key) null, null)));
-    
-    setupIter(bscanner);
-    verify(bscanner, 1, 999);
-    
-    ArrayList<Range> ranges = new ArrayList<Range>();
-    ranges.add(new Range(new Text(String.format("%06d", 1))));
-    ranges.add(new Range(new Text(String.format("%06d", 6)), new Text(String.format("%06d", 16))));
-    ranges.add(new Range(new Text(String.format("%06d", 20))));
-    ranges.add(new Range(new Text(String.format("%06d", 23))));
-    ranges.add(new Range(new Text(String.format("%06d", 56)), new Text(String.format("%06d", 61))));
-    ranges.add(new Range(new Text(String.format("%06d", 501)), new Text(String.format("%06d", 504))));
-    ranges.add(new Range(new Text(String.format("%06d", 998)), new Text(String.format("%06d", 1000))));
-    
-    HashSet<Integer> got = new HashSet<Integer>();
-    HashSet<Integer> expected = new HashSet<Integer>();
-    for (int i : new int[] {1, 7, 9, 11, 13, 15, 23, 57, 59, 61, 501, 503, 999}) {
-      expected.add(i);
-    }
-    
-    bscanner.setRanges(ranges);
-    
-    for (Entry<Key,Value> entry : bscanner) {
-      got.add(Integer.parseInt(entry.getKey().getRow().toString()));
-    }
-    
-    System.out.println("got : " + got);
-    
-    if (!got.equals(expected)) {
-      throw new Exception(got + " != " + expected);
-    }
-    
-    bscanner.close();
-    
-  }
-  
-  private void verify(Iterable<Entry<Key,Value>> scanner, int start, int finish) throws Exception {
-    
-    int expected = start;
-    for (Entry<Key,Value> entry : scanner) {
-      if (Integer.parseInt(entry.getKey().getRow().toString()) != expected) {
-        throw new Exception("Saw unexpexted " + entry.getKey().getRow() + " " + expected);
-      }
-      
-      if (entry.getKey().getColumnQualifier().toString().equals("cq2")) {
-        expected += 2;
-      }
-    }
-    
-    if (expected != finish + 2) {
-      throw new Exception("Ended at " + expected + " not " + (finish + 2));
-    }
-  }
-  
-  private void setupIter(ScannerBase scanner) throws Exception {
-    IteratorSetting dropMod = new IteratorSetting(50, "dropMod", "org.apache.accumulo.test.functional.DropModIter");
-    dropMod.addOption("mod", "2");
-    dropMod.addOption("drop", "0");
-    scanner.addScanIterator(dropMod);
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeTest.java b/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeTest.java
deleted file mode 100644
index f5a6f17..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/ScanRangeTest.java
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.TreeSet;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Range;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.hadoop.io.Text;
-
-public class ScanRangeTest extends FunctionalTest {
-  
-  private static final int TS_LIMIT = 1;
-  private static final int CQ_LIMIT = 5;
-  private static final int CF_LIMIT = 5;
-  private static final int ROW_LIMIT = 100;
-  
-  @Override
-  public void cleanup() {}
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    return Collections.emptyMap();
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    ArrayList<TableSetup> ts = new ArrayList<TableSetup>();
-    ts.add(new TableSetup("table1"));
-    
-    TreeSet<Text> splitRows = new TreeSet<Text>();
-    int splits = 3;
-    for (int i = (ROW_LIMIT / splits); i < ROW_LIMIT; i += (ROW_LIMIT / splits))
-      splitRows.add(createRow(i));
-    
-    Map<String,String> empty = Collections.emptyMap();
-    ts.add(new TableSetup("table2", empty, splitRows));
-    
-    return ts;
-  }
-  
-  @Override
-  public void run() throws Exception {
-    insertData("table1");
-    scanTable("table1");
-    
-    insertData("table2");
-    scanTable("table2");
-  }
-  
-  private void scanTable(String table) throws Exception {
-    scanRange(table, new IntKey(0, 0, 0, 0), new IntKey(1, 0, 0, 0));
-    
-    scanRange(table, new IntKey(0, 0, 0, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
-    
-    scanRange(table, null, null);
-    
-    for (int i = 0; i < ROW_LIMIT; i += (ROW_LIMIT / 3)) {
-      for (int j = 0; j < CF_LIMIT; j += (CF_LIMIT / 2)) {
-        for (int k = 1; k < CQ_LIMIT; k += (CQ_LIMIT / 2)) {
-          scanRange(table, null, new IntKey(i, j, k, 0));
-          scanRange(table, new IntKey(0, 0, 0, 0), new IntKey(i, j, k, 0));
-          
-          scanRange(table, new IntKey(i, j, k, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
-          
-          scanRange(table, new IntKey(i, j, k, 0), null);
-          
-        }
-      }
-    }
-    
-    for (int i = 0; i < ROW_LIMIT; i++) {
-      scanRange(table, new IntKey(i, 0, 0, 0), new IntKey(i, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
-      
-      if (i > 0 && i < ROW_LIMIT - 1) {
-        scanRange(table, new IntKey(i - 1, 0, 0, 0), new IntKey(i + 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
-      }
-    }
-    
-  }
-  
-  private static class IntKey {
-    private int row;
-    private int cf;
-    private int cq;
-    private long ts;
-    
-    IntKey(IntKey ik) {
-      this.row = ik.row;
-      this.cf = ik.cf;
-      this.cq = ik.cq;
-      this.ts = ik.ts;
-    }
-    
-    IntKey(int row, int cf, int cq, long ts) {
-      this.row = row;
-      this.cf = cf;
-      this.cq = cq;
-      this.ts = ts;
-    }
-    
-    Key createKey() {
-      Text trow = createRow(row);
-      Text tcf = createCF(cf);
-      Text tcq = createCQ(cq);
-      
-      return new Key(trow, tcf, tcq, ts);
-    }
-    
-    IntKey increment() {
-      
-      IntKey ik = new IntKey(this);
-      
-      ik.ts++;
-      if (ik.ts >= TS_LIMIT) {
-        ik.ts = 0;
-        ik.cq++;
-        if (ik.cq >= CQ_LIMIT) {
-          ik.cq = 0;
-          ik.cf++;
-          if (ik.cf >= CF_LIMIT) {
-            ik.cf = 0;
-            ik.row++;
-          }
-        }
-      }
-      
-      return ik;
-    }
-    
-  }
-  
-  private void scanRange(String table, IntKey ik1, IntKey ik2) throws Exception {
-    scanRange(table, ik1, false, ik2, false);
-    scanRange(table, ik1, false, ik2, true);
-    scanRange(table, ik1, true, ik2, false);
-    scanRange(table, ik1, true, ik2, true);
-  }
-  
-  private void scanRange(String table, IntKey ik1, boolean inclusive1, IntKey ik2, boolean inclusive2) throws Exception {
-    Scanner scanner = getConnector().createScanner(table, Authorizations.EMPTY);
-    
-    Key key1 = null;
-    Key key2 = null;
-    
-    IntKey expectedIntKey;
-    IntKey expectedEndIntKey;
-    
-    if (ik1 != null) {
-      key1 = ik1.createKey();
-      expectedIntKey = ik1;
-      
-      if (!inclusive1) {
-        expectedIntKey = expectedIntKey.increment();
-      }
-    } else {
-      expectedIntKey = new IntKey(0, 0, 0, 0);
-    }
-    
-    if (ik2 != null) {
-      key2 = ik2.createKey();
-      expectedEndIntKey = ik2;
-      
-      if (inclusive2) {
-        expectedEndIntKey = expectedEndIntKey.increment();
-      }
-    } else {
-      expectedEndIntKey = new IntKey(ROW_LIMIT, 0, 0, 0);
-    }
-    
-    Range range = new Range(key1, inclusive1, key2, inclusive2);
-    
-    scanner.setRange(range);
-    
-    for (Entry<Key,Value> entry : scanner) {
-      
-      Key expectedKey = expectedIntKey.createKey();
-      if (!expectedKey.equals(entry.getKey())) {
-        throw new Exception(" " + expectedKey + " != " + entry.getKey());
-      }
-      
-      expectedIntKey = expectedIntKey.increment();
-    }
-    
-    if (!expectedIntKey.createKey().equals(expectedEndIntKey.createKey())) {
-      throw new Exception(" " + expectedIntKey.createKey() + " != " + expectedEndIntKey.createKey());
-    }
-  }
-  
-  private static Text createCF(int cf) {
-    Text tcf = new Text(String.format("cf_%03d", cf));
-    return tcf;
-  }
-  
-  private static Text createCQ(int cf) {
-    Text tcf = new Text(String.format("cq_%03d", cf));
-    return tcf;
-  }
-  
-  private static Text createRow(int row) {
-    Text trow = new Text(String.format("r_%06d", row));
-    return trow;
-  }
-  
-  private void insertData(String table) throws Exception {
-    
-    BatchWriter bw = getConnector().createBatchWriter(table, new BatchWriterConfig());
-    
-    for (int i = 0; i < ROW_LIMIT; i++) {
-      Mutation m = new Mutation(createRow(i));
-      
-      for (int j = 0; j < CF_LIMIT; j++) {
-        for (int k = 0; k < CQ_LIMIT; k++) {
-          for (int t = 0; t < TS_LIMIT; t++) {
-            m.put(createCF(j), createCQ(k), t, new Value(String.format("%06d_%03d_%03d_%03d", i, j, k, t).getBytes()));
-          }
-        }
-      }
-      
-      bw.addMutation(m);
-    }
-    
-    bw.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/main/java/org/apache/accumulo/test/functional/ScanSessionTimeOutTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/ScanSessionTimeOutTest.java b/test/src/main/java/org/apache/accumulo/test/functional/ScanSessionTimeOutTest.java
deleted file mode 100644
index 1551687..0000000
--- a/test/src/main/java/org/apache/accumulo/test/functional/ScanSessionTimeOutTest.java
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test.functional;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-
-import org.apache.accumulo.core.client.BatchWriter;
-import org.apache.accumulo.core.client.BatchWriterConfig;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Mutation;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.hadoop.io.Text;
-
-public class ScanSessionTimeOutTest extends FunctionalTest {
-  
-  @Override
-  public void cleanup() throws Exception {}
-  
-  @Override
-  public Map<String,String> getInitialConfig() {
-    HashMap<String,String> config = new HashMap<String,String>();
-    // set the session idle time 3 seconds
-    config.put(Property.TSERV_SESSION_MAXIDLE.getKey(), "3");
-    return config;
-  }
-  
-  @Override
-  public List<TableSetup> getTablesToCreate() {
-    return Collections.singletonList(new TableSetup("abc"));
-  }
-  
-  @Override
-  public void run() throws Exception {
-    BatchWriter bw = getConnector().createBatchWriter("abc", new BatchWriterConfig());
-    
-    for (int i = 0; i < 100000; i++) {
-      Mutation m = new Mutation(new Text(String.format("%08d", i)));
-      for (int j = 0; j < 3; j++)
-        m.put(new Text("cf1"), new Text("cq" + j), new Value(("" + i + "_" + j).getBytes()));
-      
-      bw.addMutation(m);
-    }
-    
-    bw.close();
-    
-    Scanner scanner = getConnector().createScanner("abc", new Authorizations());
-    scanner.setBatchSize(1000);
-    
-    Iterator<Entry<Key,Value>> iter = scanner.iterator();
-    
-    verify(iter, 0, 200);
-    
-    // sleep three times the session timeout
-    UtilWaitThread.sleep(9000);
-    
-    verify(iter, 200, 100000);
-    
-  }
-  
-  private void verify(Iterator<Entry<Key,Value>> iter, int start, int stop) throws Exception {
-    for (int i = start; i < stop; i++) {
-      
-      Text er = new Text(String.format("%08d", i));
-      
-      for (int j = 0; j < 3; j++) {
-        Entry<Key,Value> entry = iter.next();
-        
-        if (!entry.getKey().getRow().equals(er)) {
-          throw new Exception("row " + entry.getKey().getRow() + " != " + er);
-        }
-        
-        if (!entry.getKey().getColumnFamily().equals(new Text("cf1"))) {
-          throw new Exception("cf " + entry.getKey().getColumnFamily() + " != cf1");
-        }
-        
-        if (!entry.getKey().getColumnQualifier().equals(new Text("cq" + j))) {
-          throw new Exception("cq " + entry.getKey().getColumnQualifier() + " != cq" + j);
-        }
-        
-        if (!entry.getValue().toString().equals("" + i + "_" + j)) {
-          throw new Exception("value " + entry.getValue() + " != " + i + "_" + j);
-        }
-        
-      }
-    }
-    
-  }
-  
-}


[38/50] [abbrv] git commit: ACCUMULO-1564 Fixing types (JWDP -> JDWP)

Posted by ct...@apache.org.
ACCUMULO-1564 Fixing types (JWDP -> JDWP)


git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1502407 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/e56edc10
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/e56edc10
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/e56edc10

Branch: refs/heads/ACCUMULO-1496
Commit: e56edc10682db342da6f76d00abe58f4963f203f
Parents: fb2c0c7
Author: Corey J. Nolet <cj...@apache.org>
Authored: Fri Jul 12 01:26:54 2013 +0000
Committer: Corey J. Nolet <cj...@apache.org>
Committed: Fri Jul 12 01:26:54 2013 +0000

----------------------------------------------------------------------
 .../accumulo/minicluster/MiniAccumuloCluster.java   |  2 +-
 .../accumulo/minicluster/MiniAccumuloConfig.java    | 16 ++++++++--------
 .../accumulo/minicluster/MiniAccumuloRunner.java    |  6 +++---
 .../minicluster/MiniAccumuloClusterTest.java        |  2 +-
 4 files changed, 13 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/e56edc10/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index c492e1b..8da22e0 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -180,7 +180,7 @@ public class MiniAccumuloCluster {
     List<String> jvmOpts = new ArrayList<String>();
     jvmOpts.add("-Xmx" + config.getMemory(serverType));
 
-    if (config.isJWDPEnabled()) {
+    if (config.isJDWPEnabled()) {
       Integer port = PortUtils.getRandomFreePort();
       jvmOpts.addAll(buildRemoteDebugParams(port));
       debugPorts.add(new Pair<ServerType,Integer>(serverType, port));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e56edc10/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
index 4defc35..f183b4e 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloConfig.java
@@ -41,7 +41,7 @@ public class MiniAccumuloConfig {
   private int numTservers = 2;
   private Map<ServerType,Long> memoryConfig = new HashMap<ServerType,Long>();
   
-  private boolean jwdpEnabled = false;
+  private boolean jdwpEnabled = false;
   
   private String instanceName = "miniInstance";
   
@@ -332,23 +332,23 @@ public class MiniAccumuloConfig {
   }
   
   /**
-   * @return is the current configuration in jwdpEnabled mode?
+   * @return is the current configuration in jdwpEnabled mode?
    * 
    * @since 1.6.0
    */
-  public boolean isJWDPEnabled() {
-    return jwdpEnabled;
+  public boolean isJDWPEnabled() {
+    return jdwpEnabled;
   }
   
   /**
-   * @param jwdpEnabled
-   *          should the processes run remote jwdpEnabled servers?
+   * @param jdwpEnabled
+   *          should the processes run remote jdwpEnabled servers?
    * @return the current instance
    * 
    * @since 1.6.0
    */
-  public MiniAccumuloConfig setJWDPEnabled(boolean jwdpEnabled) {
-    this.jwdpEnabled = jwdpEnabled;
+  public MiniAccumuloConfig setJDWPEnabled(boolean jdwpEnabled) {
+    this.jdwpEnabled = jdwpEnabled;
     return this;
   }
   

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e56edc10/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
index b02a7ab..1402b53 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
@@ -44,7 +44,7 @@ import com.google.common.io.Files;
  * instanceName=testInstance
  * numTServers=1
  * zooKeeperPort=3191
- * jwdpEnabled=true
+ * jdwpEnabled=true
  * zooKeeperMemory=128M
  * tserverMemory=256M
  * masterMemory=128M
@@ -110,8 +110,8 @@ public class MiniAccumuloRunner {
       config.setNumTservers(Integer.parseInt(opts.prop.getProperty("numTServers")));
     if (opts.prop.containsKey("zooKeeperPort"))
       config.setZooKeeperPort(Integer.parseInt(opts.prop.getProperty("zooKeeperPort")));
-    if (opts.prop.containsKey("jwdpEnabled"))
-      config.setJWDPEnabled(Boolean.parseBoolean(opts.prop.getProperty("jwdpEnabled")));
+    if (opts.prop.containsKey("jdwpEnabled"))
+      config.setJDWPEnabled(Boolean.parseBoolean(opts.prop.getProperty("jdwpEnabled")));
     if (opts.prop.containsKey("zooKeeperMemory"))
       setMemoryOnConfig(config, opts.prop.getProperty("zooKeeperMemory"), ServerType.ZOOKEEPER);
     if (opts.prop.containsKey("tserverMemory"))

http://git-wip-us.apache.org/repos/asf/accumulo/blob/e56edc10/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterTest.java
----------------------------------------------------------------------
diff --git a/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterTest.java b/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterTest.java
index a031f41..10237b1 100644
--- a/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterTest.java
+++ b/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterTest.java
@@ -59,7 +59,7 @@ public class MiniAccumuloClusterTest {
     Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR);
 
     folder.create();
-    MiniAccumuloConfig config = new MiniAccumuloConfig(folder.getRoot(), "superSecret").setJWDPEnabled(true);
+    MiniAccumuloConfig config = new MiniAccumuloConfig(folder.getRoot(), "superSecret").setJDWPEnabled(true);
     accumulo = new MiniAccumuloCluster(config);
     accumulo.start();
   }


[05/50] [abbrv] git commit: ACCUMULO-1267 record only merge from 1.4 onto 1.5

Posted by ct...@apache.org.
ACCUMULO-1267 record only merge from 1.4 onto 1.5


git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/1.5@1499201 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/0ab0fc7a
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/0ab0fc7a
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/0ab0fc7a

Branch: refs/heads/ACCUMULO-1496
Commit: 0ab0fc7a9411fc5360ccca731ef21352de9ae635
Parents: 352859a
Author: Bill Slacum <uj...@apache.org>
Authored: Wed Jul 3 04:14:20 2013 +0000
Committer: Bill Slacum <uj...@apache.org>
Committed: Wed Jul 3 04:14:20 2013 +0000

----------------------------------------------------------------------

----------------------------------------------------------------------



[18/50] [abbrv] git commit: ACCUMULO-1552 applying Jonathan Hsieh's patch to fix the typo

Posted by ct...@apache.org.
ACCUMULO-1552 applying Jonathan Hsieh's patch to fix the typo

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/1.5@1500719 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/86f1a229
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/86f1a229
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/86f1a229

Branch: refs/heads/ACCUMULO-1496
Commit: 86f1a229c5f35d5a798a138039af87f732cc54cf
Parents: 0ab0fc7
Author: Eric C. Newton <ec...@apache.org>
Authored: Mon Jul 8 13:39:52 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Mon Jul 8 13:39:52 2013 +0000

----------------------------------------------------------------------
 test/system/continuous/continuous-env.sh.example | 2 +-
 test/system/continuous/run-verify.sh             | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/86f1a229/test/system/continuous/continuous-env.sh.example
----------------------------------------------------------------------
diff --git a/test/system/continuous/continuous-env.sh.example b/test/system/continuous/continuous-env.sh.example
index dd62bc1..570c502 100644
--- a/test/system/continuous/continuous-env.sh.example
+++ b/test/system/continuous/continuous-env.sh.example
@@ -87,7 +87,7 @@ MASTER_KILL_SLEEP_TIME=60
 MASTER_RESTART_SLEEP_TIME=2
 
 #settings for the verification map reduce job
-VERFIY_OUT=/tmp/continuous_verify
+VERIFY_OUT=/tmp/continuous_verify
 VERIFY_MAX_MAPS=64
 VERIFY_REDUCERS=64
 SCAN_OFFLINE=false

http://git-wip-us.apache.org/repos/asf/accumulo/blob/86f1a229/test/system/continuous/run-verify.sh
----------------------------------------------------------------------
diff --git a/test/system/continuous/run-verify.sh b/test/system/continuous/run-verify.sh
index 1664edd..a0c64d1 100755
--- a/test/system/continuous/run-verify.sh
+++ b/test/system/continuous/run-verify.sh
@@ -28,4 +28,4 @@ if [ "$SCAN_OFFLINE" == "false" ] ; then
        SCAN_OPT=
 fi
 
-$ACCUMULO_HOME/bin/tool.sh "$SERVER_LIBJAR" org.apache.accumulo.test.continuous.ContinuousVerify -libjars "$SERVER_LIBJAR" $AUTH_OPT -i $INSTANCE_NAME -z $ZOO_KEEPERS -u $USER -p $PASS --table $TABLE --output $VERFIY_OUT --maxMappers $VERIFY_MAX_MAPS --reducers $VERIFY_REDUCERS $SCAN_OPT
+$ACCUMULO_HOME/bin/tool.sh "$SERVER_LIBJAR" org.apache.accumulo.test.continuous.ContinuousVerify -libjars "$SERVER_LIBJAR" $AUTH_OPT -i $INSTANCE_NAME -z $ZOO_KEEPERS -u $USER -p $PASS --table $TABLE --output $VERIFY_OUT --maxMappers $VERIFY_MAX_MAPS --reducers $VERIFY_REDUCERS $SCAN_OPT


[02/50] [abbrv] ACCUMULO-1537 converted many more functional tests to integration tests

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java
new file mode 100644
index 0000000..40e6f02
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/RowDeleteIT.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.apache.accumulo.test.functional.FunctionalTestUtils.checkRFiles;
+import static org.apache.accumulo.test.functional.FunctionalTestUtils.nm;
+
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
+import org.apache.accumulo.core.iterators.user.RowDeletingIterator;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class RowDeleteIT extends MacTest {
+
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_MAJC_DELAY.getKey(), "50ms"));
+  }
+
+  @Test(timeout=30*1000)
+  public void run() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("rdel1");
+    Map<String,Set<Text>> groups = new HashMap<String, Set<Text>>();
+    groups.put("lg1", Collections.singleton(new Text("foo")));
+    groups.put("dg", Collections.<Text>emptySet());
+    c.tableOperations().setLocalityGroups("rdel1", groups);
+    IteratorSetting setting = new IteratorSetting(30, RowDeletingIterator.class);
+    c.tableOperations().attachIterator("rdel1", setting, EnumSet.of(IteratorScope.majc));
+    c.tableOperations().setProperty("rdel1", Property.TABLE_MAJC_RATIO.getKey(), "100");
+    
+    BatchWriter bw = c.createBatchWriter("rdel1", new BatchWriterConfig());
+    
+    bw.addMutation(nm("r1", "foo", "cf1", "v1"));
+    bw.addMutation(nm("r1", "bar", "cf1", "v2"));
+    
+    bw.flush();
+    c.tableOperations().flush("rdel1", null, null, true);
+    
+    checkRFiles(c, "rdel1", 1, 1, 1, 1);
+    
+    int count = 0;
+    Scanner scanner = c.createScanner("rdel1", Authorizations.EMPTY);
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> entry : scanner) {
+      count++;
+    }
+    if (count != 2)
+      throw new Exception("1 count=" + count);
+    
+    bw.addMutation(nm("r1", "", "", RowDeletingIterator.DELETE_ROW_VALUE));
+    
+    bw.flush();
+    c.tableOperations().flush("rdel1", null, null, true);
+    
+    checkRFiles(c, "rdel1", 1, 1, 2, 2);
+    
+    count = 0;
+    scanner = c.createScanner("rdel1", Authorizations.EMPTY);
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> entry : scanner) {
+      count++;
+    }
+    if (count != 3)
+      throw new Exception("2 count=" + count);
+    
+    c.tableOperations().compact("rdel1", null, null, false, true);
+    
+    checkRFiles(c, "rdel1", 1, 1, 0, 0);
+    
+    count = 0;
+    scanner = c.createScanner("rdel1", Authorizations.EMPTY);
+    for (@SuppressWarnings("unused")
+    Entry<Key,Value> entry : scanner) {
+      count++;
+    }
+    if (count != 0)
+      throw new Exception("3 count=" + count);
+    
+    bw.close();
+    
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
new file mode 100644
index 0000000..5c71b30
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanIteratorIT.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.ScannerBase;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class ScanIteratorIT extends MacTest {
+  
+  @Test(timeout=30*1000)
+  public void run() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("foo");
+    
+    BatchWriter bw = c.createBatchWriter("foo", new BatchWriterConfig());
+    
+    for (int i = 0; i < 1000; i++) {
+      Mutation m = new Mutation(new Text(String.format("%06d", i)));
+      m.put(new Text("cf1"), new Text("cq1"), new Value(("" + (1000 - i)).getBytes()));
+      m.put(new Text("cf1"), new Text("cq2"), new Value(("" + (i - 1000)).getBytes()));
+      
+      bw.addMutation(m);
+    }
+    
+    bw.close();
+    
+    Scanner scanner = c.createScanner("foo", new Authorizations());
+    
+    setupIter(scanner);
+    verify(scanner, 1, 999);
+    
+    BatchScanner bscanner = c.createBatchScanner("foo", new Authorizations(), 3);
+    bscanner.setRanges(Collections.singleton(new Range((Key) null, null)));
+    
+    setupIter(bscanner);
+    verify(bscanner, 1, 999);
+    
+    ArrayList<Range> ranges = new ArrayList<Range>();
+    ranges.add(new Range(new Text(String.format("%06d", 1))));
+    ranges.add(new Range(new Text(String.format("%06d", 6)), new Text(String.format("%06d", 16))));
+    ranges.add(new Range(new Text(String.format("%06d", 20))));
+    ranges.add(new Range(new Text(String.format("%06d", 23))));
+    ranges.add(new Range(new Text(String.format("%06d", 56)), new Text(String.format("%06d", 61))));
+    ranges.add(new Range(new Text(String.format("%06d", 501)), new Text(String.format("%06d", 504))));
+    ranges.add(new Range(new Text(String.format("%06d", 998)), new Text(String.format("%06d", 1000))));
+    
+    HashSet<Integer> got = new HashSet<Integer>();
+    HashSet<Integer> expected = new HashSet<Integer>();
+    for (int i : new int[] {1, 7, 9, 11, 13, 15, 23, 57, 59, 61, 501, 503, 999}) {
+      expected.add(i);
+    }
+    
+    bscanner.setRanges(ranges);
+    
+    for (Entry<Key,Value> entry : bscanner) {
+      got.add(Integer.parseInt(entry.getKey().getRow().toString()));
+    }
+    
+    System.out.println("got : " + got);
+    
+    if (!got.equals(expected)) {
+      throw new Exception(got + " != " + expected);
+    }
+    
+    bscanner.close();
+    
+  }
+  
+  private void verify(Iterable<Entry<Key,Value>> scanner, int start, int finish) throws Exception {
+    
+    int expected = start;
+    for (Entry<Key,Value> entry : scanner) {
+      if (Integer.parseInt(entry.getKey().getRow().toString()) != expected) {
+        throw new Exception("Saw unexpexted " + entry.getKey().getRow() + " " + expected);
+      }
+      
+      if (entry.getKey().getColumnQualifier().toString().equals("cq2")) {
+        expected += 2;
+      }
+    }
+    
+    if (expected != finish + 2) {
+      throw new Exception("Ended at " + expected + " not " + (finish + 2));
+    }
+  }
+  
+  private void setupIter(ScannerBase scanner) throws Exception {
+    IteratorSetting dropMod = new IteratorSetting(50, "dropMod", "org.apache.accumulo.test.functional.DropModIter");
+    dropMod.addOption("mod", "2");
+    dropMod.addOption("drop", "0");
+    scanner.addScanIterator(dropMod);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
new file mode 100644
index 0000000..94369a3
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanRangeIT.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Map.Entry;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class ScanRangeIT extends MacTest {
+  
+  private static final int TS_LIMIT = 1;
+  private static final int CQ_LIMIT = 5;
+  private static final int CF_LIMIT = 5;
+  private static final int ROW_LIMIT = 100;
+  
+  @Test(timeout=30*1000)
+  public void run() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("table1");
+    c.tableOperations().create("table2");
+    TreeSet<Text> splitRows = new TreeSet<Text>();
+    int splits = 3;
+    for (int i = (ROW_LIMIT / splits); i < ROW_LIMIT; i += (ROW_LIMIT / splits))
+      splitRows.add(createRow(i));
+    c.tableOperations().addSplits("table2", splitRows);
+    
+    insertData(c, "table1");
+    scanTable(c, "table1");
+    
+    insertData(c, "table2");
+    scanTable(c, "table2");
+  }
+  
+  private void scanTable(Connector c, String table) throws Exception {
+    scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(1, 0, 0, 0));
+    
+    scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
+    
+    scanRange(c, table, null, null);
+    
+    for (int i = 0; i < ROW_LIMIT; i += (ROW_LIMIT / 3)) {
+      for (int j = 0; j < CF_LIMIT; j += (CF_LIMIT / 2)) {
+        for (int k = 1; k < CQ_LIMIT; k += (CQ_LIMIT / 2)) {
+          scanRange(c, table, null, new IntKey(i, j, k, 0));
+          scanRange(c, table, new IntKey(0, 0, 0, 0), new IntKey(i, j, k, 0));
+          
+          scanRange(c, table, new IntKey(i, j, k, 0), new IntKey(ROW_LIMIT - 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
+          
+          scanRange(c, table, new IntKey(i, j, k, 0), null);
+          
+        }
+      }
+    }
+    
+    for (int i = 0; i < ROW_LIMIT; i++) {
+      scanRange(c, table, new IntKey(i, 0, 0, 0), new IntKey(i, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
+      
+      if (i > 0 && i < ROW_LIMIT - 1) {
+        scanRange(c, table, new IntKey(i - 1, 0, 0, 0), new IntKey(i + 1, CF_LIMIT - 1, CQ_LIMIT - 1, 0));
+      }
+    }
+    
+  }
+  
+  private static class IntKey {
+    private int row;
+    private int cf;
+    private int cq;
+    private long ts;
+    
+    IntKey(IntKey ik) {
+      this.row = ik.row;
+      this.cf = ik.cf;
+      this.cq = ik.cq;
+      this.ts = ik.ts;
+    }
+    
+    IntKey(int row, int cf, int cq, long ts) {
+      this.row = row;
+      this.cf = cf;
+      this.cq = cq;
+      this.ts = ts;
+    }
+    
+    Key createKey() {
+      Text trow = createRow(row);
+      Text tcf = createCF(cf);
+      Text tcq = createCQ(cq);
+      
+      return new Key(trow, tcf, tcq, ts);
+    }
+    
+    IntKey increment() {
+      
+      IntKey ik = new IntKey(this);
+      
+      ik.ts++;
+      if (ik.ts >= TS_LIMIT) {
+        ik.ts = 0;
+        ik.cq++;
+        if (ik.cq >= CQ_LIMIT) {
+          ik.cq = 0;
+          ik.cf++;
+          if (ik.cf >= CF_LIMIT) {
+            ik.cf = 0;
+            ik.row++;
+          }
+        }
+      }
+      
+      return ik;
+    }
+    
+  }
+  
+  private void scanRange(Connector c, String table, IntKey ik1, IntKey ik2) throws Exception {
+    scanRange(c, table, ik1, false, ik2, false);
+    scanRange(c, table, ik1, false, ik2, true);
+    scanRange(c, table, ik1, true, ik2, false);
+    scanRange(c, table, ik1, true, ik2, true);
+  }
+  
+  private void scanRange(Connector c, String table, IntKey ik1, boolean inclusive1, IntKey ik2, boolean inclusive2) throws Exception {
+    Scanner scanner = c.createScanner(table, Authorizations.EMPTY);
+    
+    Key key1 = null;
+    Key key2 = null;
+    
+    IntKey expectedIntKey;
+    IntKey expectedEndIntKey;
+    
+    if (ik1 != null) {
+      key1 = ik1.createKey();
+      expectedIntKey = ik1;
+      
+      if (!inclusive1) {
+        expectedIntKey = expectedIntKey.increment();
+      }
+    } else {
+      expectedIntKey = new IntKey(0, 0, 0, 0);
+    }
+    
+    if (ik2 != null) {
+      key2 = ik2.createKey();
+      expectedEndIntKey = ik2;
+      
+      if (inclusive2) {
+        expectedEndIntKey = expectedEndIntKey.increment();
+      }
+    } else {
+      expectedEndIntKey = new IntKey(ROW_LIMIT, 0, 0, 0);
+    }
+    
+    Range range = new Range(key1, inclusive1, key2, inclusive2);
+    
+    scanner.setRange(range);
+    
+    for (Entry<Key,Value> entry : scanner) {
+      
+      Key expectedKey = expectedIntKey.createKey();
+      if (!expectedKey.equals(entry.getKey())) {
+        throw new Exception(" " + expectedKey + " != " + entry.getKey());
+      }
+      
+      expectedIntKey = expectedIntKey.increment();
+    }
+    
+    if (!expectedIntKey.createKey().equals(expectedEndIntKey.createKey())) {
+      throw new Exception(" " + expectedIntKey.createKey() + " != " + expectedEndIntKey.createKey());
+    }
+  }
+  
+  private static Text createCF(int cf) {
+    Text tcf = new Text(String.format("cf_%03d", cf));
+    return tcf;
+  }
+  
+  private static Text createCQ(int cf) {
+    Text tcf = new Text(String.format("cq_%03d", cf));
+    return tcf;
+  }
+  
+  private static Text createRow(int row) {
+    Text trow = new Text(String.format("r_%06d", row));
+    return trow;
+  }
+  
+  private void insertData(Connector c, String table) throws Exception {
+    
+    BatchWriter bw = c.createBatchWriter(table, new BatchWriterConfig());
+    
+    for (int i = 0; i < ROW_LIMIT; i++) {
+      Mutation m = new Mutation(createRow(i));
+      
+      for (int j = 0; j < CF_LIMIT; j++) {
+        for (int k = 0; k < CQ_LIMIT; k++) {
+          for (int t = 0; t < TS_LIMIT; t++) {
+            m.put(createCF(j), createCQ(k), t, new Value(String.format("%06d_%03d_%03d_%03d", i, j, k, t).getBytes()));
+          }
+        }
+      }
+      
+      bw.addMutation(m);
+    }
+    
+    bw.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
new file mode 100644
index 0000000..6d066a6
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ScanSessionTimeOutIT.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class ScanSessionTimeOutIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    cfg.setSiteConfig(Collections.singletonMap(Property.TSERV_SESSION_MAXIDLE.getKey(), "3"));
+  }
+
+  @Test(timeout=30*1000)
+  public void run() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("abc");
+    
+    BatchWriter bw = c.createBatchWriter("abc", new BatchWriterConfig());
+    
+    for (int i = 0; i < 100000; i++) {
+      Mutation m = new Mutation(new Text(String.format("%08d", i)));
+      for (int j = 0; j < 3; j++)
+        m.put(new Text("cf1"), new Text("cq" + j), new Value(("" + i + "_" + j).getBytes()));
+      
+      bw.addMutation(m);
+    }
+    
+    bw.close();
+    
+    Scanner scanner = c.createScanner("abc", new Authorizations());
+    scanner.setBatchSize(1000);
+    
+    Iterator<Entry<Key,Value>> iter = scanner.iterator();
+    
+    verify(iter, 0, 200);
+    
+    // sleep three times the session timeout
+    UtilWaitThread.sleep(9000);
+    
+    verify(iter, 200, 100000);
+    
+  }
+  
+  private void verify(Iterator<Entry<Key,Value>> iter, int start, int stop) throws Exception {
+    for (int i = start; i < stop; i++) {
+      
+      Text er = new Text(String.format("%08d", i));
+      
+      for (int j = 0; j < 3; j++) {
+        Entry<Key,Value> entry = iter.next();
+        
+        if (!entry.getKey().getRow().equals(er)) {
+          throw new Exception("row " + entry.getKey().getRow() + " != " + er);
+        }
+        
+        if (!entry.getKey().getColumnFamily().equals(new Text("cf1"))) {
+          throw new Exception("cf " + entry.getKey().getColumnFamily() + " != cf1");
+        }
+        
+        if (!entry.getKey().getColumnQualifier().equals(new Text("cq" + j))) {
+          throw new Exception("cq " + entry.getKey().getColumnQualifier() + " != cq" + j);
+        }
+        
+        if (!entry.getValue().toString().equals("" + i + "_" + j)) {
+          throw new Exception("value " + entry.getValue() + " != " + i + "_" + j);
+        }
+        
+      }
+    }
+    
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
new file mode 100644
index 0000000..3237fc1
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ServerSideErrorIT.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Collections;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.admin.TableOperations;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.iterators.Combiner;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class ServerSideErrorIT extends MacTest {
+  
+  @Test
+  public void run() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("tt");
+    IteratorSetting is = new IteratorSetting(5, "Bad Aggregator", BadCombiner.class);
+    Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("acf")));
+    c.tableOperations().attachIterator("tt", is);
+    
+    BatchWriter bw = c.createBatchWriter("tt", new BatchWriterConfig());
+    
+    Mutation m = new Mutation(new Text("r1"));
+    m.put(new Text("acf"), new Text("foo"), new Value("1".getBytes()));
+    
+    bw.addMutation(m);
+    
+    bw.close();
+    
+    // try to scan table
+    Scanner scanner = c.createScanner("tt", Authorizations.EMPTY);
+    
+    boolean caught = false;
+    try {
+      for (Entry<Key,Value> entry : scanner) {
+        entry.getKey();
+      }
+    } catch (Exception e) {
+      caught = true;
+    }
+    
+    if (!caught)
+      throw new Exception("Scan did not fail");
+    
+    // try to batch scan the table
+    BatchScanner bs = c.createBatchScanner("tt", Authorizations.EMPTY, 2);
+    bs.setRanges(Collections.singleton(new Range()));
+    
+    caught = false;
+    try {
+      for (Entry<Key,Value> entry : bs) {
+        entry.getKey();
+      }
+      bs.close();
+    } catch (Exception e) {
+      caught = true;
+    }
+    if (!caught)
+      throw new Exception("batch scan did not fail");
+    
+    // remove the bad agg so accumulo can shutdown
+    TableOperations to = c.tableOperations();
+    for (Entry<String,String> e : to.getProperties("tt")) {
+      to.removeProperty("tt", e.getKey());
+    }
+    
+    UtilWaitThread.sleep(500);
+    
+    // should be able to scan now
+    scanner = c.createScanner("tt", Authorizations.EMPTY);
+    for (Entry<Key,Value> entry : scanner) {
+      entry.getKey();
+    }
+    
+    // set a non existant iterator, should cause scan to fail on server side
+    scanner.addScanIterator(new IteratorSetting(100, "bogus", "com.bogus.iterator"));
+    
+    caught = false;
+    try {
+      for (Entry<Key,Value> entry : scanner) {
+        // should error
+        entry.getKey();
+      }
+    } catch (Exception e) {
+      caught = true;
+    }
+    
+    if (!caught)
+      throw new Exception("Scan did not fail");
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java b/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
new file mode 100644
index 0000000..4d1b21b
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/ShutdownIT.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+import java.util.List;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.TestRandomDeletes;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+public class ShutdownIT extends MacTest {
+  
+  @Test(timeout=60*1000)
+  public void shutdownDuringIngest() throws Exception {
+    Process ingest = cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", MacTest.PASSWORD, "--createTable");
+    UtilWaitThread.sleep(100);
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+    ingest.destroy();
+  }
+  
+  @Test(timeout=60*1000)
+  public void shutdownDuringQuery() throws Exception {
+    assertEquals(0, cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", MacTest.PASSWORD, "--createTable").waitFor());
+    Process verify = cluster.exec(VerifyIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", MacTest.PASSWORD);
+    UtilWaitThread.sleep(100);
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+    verify.destroy();
+  }
+  
+  @Test(timeout=30*1000)
+  public void shutdownDuringDelete() throws Exception {
+    assertEquals(0, cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", MacTest.PASSWORD, "--createTable").waitFor());
+    Process deleter = cluster.exec(TestRandomDeletes.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", MacTest.PASSWORD);
+    UtilWaitThread.sleep(100);
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+    deleter.destroy();
+  }
+
+  
+  @Test(timeout=30*1000)
+  public void shutdownDuringDeleteTable() throws Exception {
+    final Connector c = getConnector();
+    for (int i = 0; i < 10 ; i++) {
+      c.tableOperations().create("table" + i);
+    }
+    final AtomicReference<Exception> ref = new AtomicReference<Exception>();
+    Thread async = new Thread() {
+      public void run() {
+        try {
+          for (int i = 0; i < 10; i++)
+            c.tableOperations().delete("table" + i);
+        } catch (Exception ex) {
+          ref.set(ex);
+        }
+      }
+    };
+    async.start();
+    UtilWaitThread.sleep(100);
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+    if (ref.get() != null)
+      throw ref.get();
+  }
+  
+  @Test(timeout=60*1000)
+  public void stopDuringStart() throws Exception {
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+  }
+  
+  @Test(timeout=30*1000)
+  public void adminStop() throws Exception {
+    Connector c = getConnector();
+    assertEquals(0, cluster.exec(TestIngest.class, "-i", cluster.getInstanceName(), "-z", cluster.getZooKeepers(), "-p", MacTest.PASSWORD, "--createTable").waitFor());
+    List<String> tabletServers = c.instanceOperations().getTabletServers();
+    assertEquals(2, tabletServers.size());
+    String doomed = tabletServers.get(0);
+    assertEquals(0, cluster.exec(Admin.class, "stop", doomed).waitFor());
+    tabletServers = c.instanceOperations().getTabletServers();
+    assertEquals(1, tabletServers.size());
+    assertFalse(tabletServers.get(0).equals(doomed));
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
new file mode 100644
index 0000000..620dc47
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SimpleBalancerFairnessIT.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.impl.MasterClient;
+import org.apache.accumulo.core.client.security.tokens.PasswordToken;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.master.thrift.MasterClientService;
+import org.apache.accumulo.core.master.thrift.MasterMonitorInfo;
+import org.apache.accumulo.core.master.thrift.TableInfo;
+import org.apache.accumulo.core.master.thrift.TabletServerStatus;
+import org.apache.accumulo.core.security.CredentialHelper;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.trace.instrument.Tracer;
+import org.junit.Test;
+
+public class SimpleBalancerFairnessIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String,String> siteConfig = new HashMap<String, String>();
+    siteConfig.put(Property.TSERV_MAXMEM.getKey(), "10K");
+    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "0");
+    cfg.setSiteConfig(siteConfig );
+  }
+  
+  @Test(timeout=120*1000)
+  public void simpleBalancerFairness() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+    c.tableOperations().create("unused");
+    c.tableOperations().addSplits("unused", TestIngest.getSplitPoints(0, 10000000, 2000));
+    List<String> tservers = c.instanceOperations().getTabletServers();
+    TestIngest.Opts opts = new TestIngest.Opts();
+    opts.rows = 200000;
+    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    c.tableOperations().flush("test_ingest", null, null, false);
+    UtilWaitThread.sleep(15*1000);
+    TCredentials creds = CredentialHelper.create("root", new PasswordToken(MacTest.PASSWORD), c.getInstance().getInstanceName());
+    
+    MasterClientService.Iface client = null;
+    MasterMonitorInfo stats = null;
+    try {
+      client = MasterClient.getConnectionWithRetry(c.getInstance());
+      stats = client.getMasterStats(Tracer.traceInfo(), creds);
+    } finally {
+      if (client != null)
+        MasterClient.close(client);
+    }
+    List<Integer> counts = new ArrayList<Integer>();
+    for (TabletServerStatus server: stats.tServerInfo) {
+      int count = 0;
+      for (TableInfo table : server.tableMap.values()) {
+        count += table.onlineTablets;
+      }
+      counts.add(count);
+    }
+    assertTrue(counts.size() > 1);
+    for (int i = 1; i < counts.size(); i++)
+      assertTrue(Math.abs(counts.get(0) - counts.get(i)) <= tservers.size());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
new file mode 100644
index 0000000..1f4a3fc
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SparseColumnFamilyIT.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Iterator;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+/**
+ * This test recreates issue ACCUMULO-516. Until that issue is fixed this test should time out.
+ */
+public class SparseColumnFamilyIT extends MacTest {
+  
+  @Test(timeout=30*1000)
+  public void sparceColumnFamily() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("scftt");
+    
+    BatchWriter bw = c.createBatchWriter("scftt", new BatchWriterConfig());
+    
+    // create file in the tablet that has mostly column family 0, with a few entries for column family 1
+    
+    bw.addMutation(nm(0, 1, 0));
+    for (int i = 1; i < 99999; i++) {
+      bw.addMutation(nm(i * 2, 0, i));
+    }
+    bw.addMutation(nm(99999 * 2, 1, 99999));
+    bw.flush();
+    
+    c.tableOperations().flush("scftt", null, null, true);
+    
+    // create a file that has column family 1 and 0 interleaved
+    for (int i = 0; i < 100000; i++) {
+      bw.addMutation(nm(i * 2 + 1, i % 2 == 0 ? 0 : 1, i));
+    }
+    bw.close();
+    
+    c.tableOperations().flush("scftt", null, null, true);
+    
+    Scanner scanner = c.createScanner("scftt", Authorizations.EMPTY);
+    
+    for (int i = 0; i < 200; i++) {
+      
+      // every time we search for column family 1, it will scan the entire file
+      // that has mostly column family 0 until the bug is fixed
+      scanner.setRange(new Range(String.format("%06d", i), null));
+      scanner.clearColumns();
+      scanner.setBatchSize(3);
+      scanner.fetchColumnFamily(new Text(String.format("%03d", 1)));
+      
+      long t1 = System.currentTimeMillis();
+      Iterator<Entry<Key,Value>> iter = scanner.iterator();
+      if (iter.hasNext()) {
+        Entry<Key,Value> entry = iter.next();
+        if (!"001".equals(entry.getKey().getColumnFamilyData().toString())) {
+          throw new Exception();
+        }
+      }
+      long t2 = System.currentTimeMillis();
+      
+      System.out.println("time " + (t2 - t1));
+      
+    }
+  }
+  
+  /**
+   * @param i
+   * @param j
+   * @param k
+   * @return
+   */
+  private Mutation nm(int row, int cf, int val) {
+    Mutation m = new Mutation(String.format("%06d", row));
+    m.put(String.format("%03d", cf), "", "" + val);
+    return m;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
new file mode 100644
index 0000000..dc4adc2
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.*;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.server.util.CheckForMetadataProblems;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+public class SplitIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String, String> siteConfig = new HashMap<String, String>();
+    siteConfig.put(Property.TSERV_MAXMEM.getKey(), "5K");
+    siteConfig.put(Property.TSERV_MAJC_DELAY.getKey(), "1");
+    cfg.setSiteConfig(siteConfig);
+  }
+
+  @Test(timeout=60*1000)
+  public void tabletShouldSplit() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "256K");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K");
+    TestIngest.Opts opts = new TestIngest.Opts();
+    opts.rows = 100000;
+    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+    vopts.rows = opts.rows;
+    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+    UtilWaitThread.sleep(10*1000);
+    String id = c.tableOperations().tableIdMap().get("test_ingest");
+    Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    KeyExtent extent = new KeyExtent(new Text(id), null, null);
+    s.setRange(extent.toMetadataRange());
+    MetadataTable.PREV_ROW_COLUMN.fetch(s);
+    int count = 0;
+    int shortened = 0;
+    for (Entry<Key,Value> entry : s) {
+      extent = new KeyExtent(entry.getKey().getRow(), entry.getValue());
+      if (extent.getEndRow() != null && extent.getEndRow().toString().length() < 14)
+        shortened++;
+      count++;
+    }
+    assertTrue(shortened > 0);
+    assertTrue(count > 10);
+    assertEquals(0, cluster.exec(CheckForMetadataProblems.class, "-i", cluster.getInstanceName(), "-u", "root", "-p", MacTest.PASSWORD, "-z", cluster.getZooKeepers()).waitFor());
+  }
+  
+  @Test(timeout=60*1000)
+  public void interleaveSplit() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+    ReadWriteIT.interleaveTest(c);
+    UtilWaitThread.sleep(10*1000);
+    assertTrue(c.tableOperations().listSplits("test_ingest").size() > 20);
+  }
+  
+  @Test(timeout=120*1000)
+  public void deleteSplit() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
+    DeleteIT.deleteTest(c);
+    assertTrue(c.tableOperations().listSplits("test_ingest").size() > 30);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java b/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java
new file mode 100644
index 0000000..3a7fc93
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/StartIT.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.*;
+
+import org.apache.accumulo.start.TestMain;
+import org.junit.Test;
+
+public class StartIT extends MacTest {
+  
+  @Test(timeout=10*1000)
+  public void test() throws Exception {
+    assertTrue(cluster.exec(TestMain.class, "exception").waitFor() != 0);
+    assertEquals(0, cluster.exec(TestMain.class, "success").waitFor());
+    assertTrue(cluster.exec(TestMain.class).waitFor() != 0);
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
new file mode 100644
index 0000000..d92088e
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TableIT.java
@@ -0,0 +1,75 @@
+package org.apache.accumulo.test.functional;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import static org.junit.Assert.*;
+
+import java.util.Map.Entry;
+
+import org.apache.accumulo.core.cli.BatchWriterOpts;
+import org.apache.accumulo.core.cli.ScannerOpts;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.CachedConfiguration;
+import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.server.util.Admin;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+
+public class TableIT extends MacTest {
+  
+  @Test(timeout=60*1000)
+  public void test() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    TestIngest.Opts opts = new TestIngest.Opts();
+    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+    String id = c.tableOperations().tableIdMap().get("test_ingest");
+    Scanner s = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    s.setRange(new KeyExtent(new Text(id), null, null).toMetadataRange());
+    int count = 0;
+    for (@SuppressWarnings("unused") Entry<Key,Value> entry : s) {
+      count++;
+    }
+    assertTrue(count > 0);
+    FileSystem fs = FileSystem.get(CachedConfiguration.getInstance());
+    assertTrue(fs.listStatus(new Path(cluster.getConfig().getDir() + "/accumulo/tables/" + id)).length > 0);
+    c.tableOperations().delete("test_ingest");
+    count = 0;
+    for (@SuppressWarnings("unused") Entry<Key,Value> entry : s) {
+      count++;
+    }
+    assertEquals(0, count);
+    assertEquals(0, fs.listStatus(new Path(cluster.getConfig().getDir() + "/accumulo/tables/" + id)).length);
+    assertNull(c.tableOperations().tableIdMap().get("test_ingest"));
+    c.tableOperations().create("test_ingest");
+    TestIngest.ingest(c, opts, new BatchWriterOpts());
+    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
+    assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
new file mode 100644
index 0000000..f222a04
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TabletIT.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.test.CreateTestTable;
+import org.junit.Test;
+
+public class TabletIT extends MacTest {
+  
+  private static final int N = 1000;
+
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    Map<String, String> siteConfig = new HashMap<String,String>();
+    siteConfig.put(Property.TABLE_SPLIT_THRESHOLD.getKey(), "200");
+    siteConfig.put(Property.TSERV_MAXMEM.getKey(), "128M");
+    cfg.setSiteConfig(siteConfig);
+  }
+
+  @Test(timeout=30*1000)
+  public void test() throws Exception {
+    assertEquals(0, cluster.exec(CreateTestTable.class, "" + N).waitFor());
+    assertEquals(0, cluster.exec(CreateTestTable.class, "-readonly", "" + N).waitFor());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
new file mode 100644
index 0000000..c9c9d3b
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/functional/TimeoutIT.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Collections;
+import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.BatchScanner;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.IteratorSetting;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.TimedOutException;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.junit.Test;
+import static org.junit.Assert.*;
+
+/**
+ * 
+ */
+public class TimeoutIT extends MacTest {
+  
+  @Test
+  public void run() throws Exception {
+    Connector conn = getConnector();
+    testBatchWriterTimeout(conn);
+    testBatchScannerTimeout(conn);
+  }
+  
+  public void testBatchWriterTimeout(Connector conn) throws Exception {
+    conn.tableOperations().create("foo1");
+    conn.tableOperations().addConstraint("foo1", SlowConstraint.class.getName());
+    
+    // give constraint time to propagate through zookeeper
+    UtilWaitThread.sleep(250);
+    
+    BatchWriter bw = conn.createBatchWriter("foo1", new BatchWriterConfig().setTimeout(3, TimeUnit.SECONDS));
+    
+    Mutation mut = new Mutation("r1");
+    mut.put("cf1", "cq1", "v1");
+    
+    bw.addMutation(mut);
+    try {
+      bw.close();
+      fail("batch writer did not timeout");
+    } catch (MutationsRejectedException mre) {
+      if (!(mre.getCause() instanceof TimedOutException)) {
+        throw mre;
+      }
+    }
+  }
+  
+  public void testBatchScannerTimeout(Connector conn) throws Exception {
+    getConnector().tableOperations().create("timeout");
+    
+    BatchWriter bw = getConnector().createBatchWriter("timeout", new BatchWriterConfig());
+    
+    Mutation m = new Mutation("r1");
+    m.put("cf1", "cq1", "v1");
+    m.put("cf1", "cq2", "v2");
+    m.put("cf1", "cq3", "v3");
+    m.put("cf1", "cq4", "v4");
+    
+    bw.addMutation(m);
+    
+    bw.close();
+    
+    BatchScanner bs = getConnector().createBatchScanner("timeout", Authorizations.EMPTY, 2);
+    bs.setTimeout(1, TimeUnit.SECONDS);
+    bs.setRanges(Collections.singletonList(new Range()));
+    
+    // should not timeout
+    for (Entry<Key,Value> entry : bs) {
+      entry.getKey();
+    }
+    
+    IteratorSetting iterSetting = new IteratorSetting(100, SlowIterator.class);
+    iterSetting.addOption("sleepTime", 2000 + "");
+    getConnector().tableOperations().attachIterator("timeout", iterSetting);
+    UtilWaitThread.sleep(250);
+    
+    try {
+      for (Entry<Key,Value> entry : bs) {
+        entry.getKey();
+      }
+      fail("batch scanner did not time out");
+    } catch (TimedOutException toe) {
+      // toe.printStackTrace();
+    }
+    
+    bs.close();
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/examples.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/examples.py b/test/system/auto/simple/examples.py
deleted file mode 100755
index 069148d..0000000
--- a/test/system/auto/simple/examples.py
+++ /dev/null
@@ -1,348 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import logging
-import unittest
-import time
-import sys
-import glob
-
-from TestUtils import TestUtilsMixin, ACCUMULO_HOME, SITE, ROOT, ROOT_PASSWORD, INSTANCE_NAME, ZOOKEEPERS
-
-table='testTable'
-count=str(10000)
-min=str(0)
-max=str(99999)
-valueSize=str(100)
-memory=str(1<<20)
-latency=str(1000)
-numThreads=str(4)
-visibility='A|B'
-auths='A,B'
-
-log = logging.getLogger('test.auto')
-
-class Examples(TestUtilsMixin, unittest.TestCase):
-    "Start a clean accumulo, run the examples"
-    order = 21
-
-    def runExample(self, cmd):
-        return self.wait(self.runOn(self.masterHost(), [self.accumulo_sh(),] + cmd))
-
-    def ashell(self, input, expected = 0):
-        out, err, code = self.shell(self.masterHost(), input + '\n')
-        self.assert_(code == expected)
-        return out
-
-    def comment(self, description):
-        LINE = '-'*40
-        log.info(LINE)
-        log.info(description)
-        log.info(LINE)
-
-    def execute(self, *cmd):
-        self.wait(self.runOn('localhost', cmd))
-
-    def runTest(self):
-        examplesJar = glob.glob(ACCUMULO_HOME+'/lib/accumulo-examples-simple*.jar')[0]
-
-	self.comment("Testing MaxMutation constraint")
-	self.ashell('createtable test_ingest\n'
-                    'constraint -a org.apache.accumulo.examples.simple.constraints.MaxMutationSize\n')
-        handle = self.runOn('localhost', [self.accumulo_sh(), 'org.apache.accumulo.test.TestIngest', '-u', ROOT, '--rows', '1', '--start', '0', '--cols', '10000', '-p', ROOT_PASSWORD])
-        out, err = handle.communicate()
-        self.failIf(handle.returncode==0)
-        self.failUnless(err.find("MutationsRejectedException: # constraint violations : 1") >= 0, "Was able to insert a mutation larger than max size")
-        
-        self.ashell('createtable %s\nsetauths -u %s -s A,B\nquit\n' %(table, ROOT))
-        self.comment("Testing dirlist example (a little)")
-        self.comment("  ingesting accumulo source")
-        self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.dirlist.Ingest',
-                     '-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD,
-                     '--dirTable', 'dirTable',
-                     '--indexTable', 'indexTable',
-                     '--dataTable', 'dataTable',
-                     '--vis', visibility,
-                     '--chunkSize', 100000,
-                     ACCUMULO_HOME+"/test")
-        self.comment("  searching for a file")
-        handle = self.runOn('localhost', [self.accumulo_sh(), 'org.apache.accumulo.examples.simple.dirlist.QueryUtil',
-                                          '-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD,
-                                          '-t', 'indexTable', '--auths', auths, '--search', '--path', 'examples.py'])
-        out, err = handle.communicate()
-        self.assert_(handle.returncode == 0)
-        self.assert_(out.find('test/system/auto/simple/examples.py') >= 0)
-        self.comment("  found file at " + out)
-
-    
-        self.comment("Testing ageoff filtering")
-        out = self.ashell("createtable filtertest\n"
-                     "setiter -t filtertest -scan -p 10 -n myfilter -ageoff\n"
-                     "\n"
-                     "5000\n"
-                     "\n"
-                     "insert foo a b c\n"
-                     "scan\n"
-                     "sleep 5\n"
-                     "scan\n")
-        self.assert_(2 == len([line for line in out.split('\n') if line.find('foo') >= 0]))
-
-        self.comment("Testing bloom filters are fast for missing data")
-        self.ashell('createtable bloom_test\nconfig -t bloom_test -s table.bloom.enabled=true\n')
-        self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.client.RandomBatchWriter', '--seed', '7',
-                     '-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD, '-t', 'bloom_test',
-                     '--num', '1000000', '--min', '0', '--max', '1000000000', '--size', '50', '--batchMemory', '2M', '--batchLatency', '60s', 
-                     '--batchThreads', '3')
-        self.ashell('flush -t bloom_test -w\n')
-        now = time.time()
-        self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.client.RandomBatchScanner', '--seed', '7',
-                     '-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD, '-t', 'bloom_test',
-                     '--num', '500', '--min', '0', '--max', '1000000000', '--size', '50', '--scanThreads', 4)
-        diff = time.time() - now
-        now = time.time()
-        self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.client.RandomBatchScanner', '--seed', '8',
-                     '-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD, '-t', 'bloom_test',
-                     '--num', '500', '--min', '0', '--max', '1000000000', '--size', '50', '--scanThreads', 4)
-        diff2 = time.time() - now
-        self.assert_(diff2 < diff)
-
-        self.comment("Creating a sharded index of the accumulo java files")
-        self.ashell('createtable shard\ncreatetable doc2term\nquit\n')
-        self.execute('/bin/sh', '-c',
-                     'find src -name "*.java" | xargs ./bin/accumulo org.apache.accumulo.simple.examples.shard.Index -i %s -z %s -t shard -u %s -p %s --partitions 30' %
-                     (INSTANCE_NAME, ZOOKEEPERS, ROOT, ROOT_PASSWORD))
-        self.execute(self.accumulo_sh(), 'org.apache.accumulo.simple.examples.shard.Query',
-                     '-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-t', 'shard', '-u', ROOT, '-p', ROOT_PASSWORD,
-                     'foo', 'bar')
-        self.comment("Creating a word index of the sharded files")
-        self.execute(self.accumulo_sh(), 'org.apache.accumulo.simple.examples.shard.Reverse',
-                     '-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-t', 'shard', '--doc2Term', 'doc2term', '-u', ROOT, '-p', ROOT_PASSWORD)
-        self.comment("Making 1000 conjunctive queries of 5 random words")
-        self.execute(self.accumulo_sh(), 'org.apache.accumulo.simple.examples.shard.ContinuousQuery',
-                     '-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-t', 'shard', '--doc2Term', 'doc2term', '-u', ROOT, '-p', ROOT_PASSWORD, '--term', 5, '--count', 1000)
-
-        self.execute('hadoop', 'fs', '-rmr', "/tmp/input", "/tmp/files", "/tmp/splits.txt", "/tmp/failures")
-        self.execute('hadoop', 'fs', '-mkdir', "/tmp/input")
-        self.comment("Starting bulk ingest example")
-        self.comment("   Creating some test data")
-        self.execute(self.accumulo_sh(), 'org.apache.accumulo.simple.examples.mapreduce.bulk.GenerateTestData', 0, 1000000, '/tmp/input/data')
-        self.execute(self.accumulo_sh(), 'org.apache.accumulo.simple.examples.mapreduce.bulk.SetupTable',
-                 INSTANCE_NAME, ZOOKEEPERS, ROOT, ROOT_PASSWORD, 'bulkTable')
-        self.execute(ACCUMULO_HOME+'/bin/tool.sh', examplesJar, 'org.apache.accumulo.simple.examples.mapreduce.bulk.BulkIngestExample',
-                 INSTANCE_NAME, ZOOKEEPERS, ROOT, ROOT_PASSWORD, 'bulkTable', '/tmp/input', '/tmp')
-        self.execute(ACCUMULO_HOME+'/bin/tool.sh', examplesJar, 'org.apache.accumulo.simple.examples.mapreduce.bulk.VerifyIngest',
-                 INSTANCE_NAME, ZOOKEEPERS, ROOT, ROOT_PASSWORD, 'bulkTable', 0, 1000000)
-        self.wait(self.runOn(self.masterHost(), [
-            'hadoop', 'fs', '-rmr', "/tmp/tableFile", "/tmp/nines"
-            ]))
-        self.comment("Running TeraSortIngest for a million rows")
-        # 10,000 times smaller than the real terasort
-        ROWS = 1000*1000
-        self.wait(self.runOn(self.masterHost(), [
-            ACCUMULO_HOME+'/bin/tool.sh',
-            examplesJar,
-            'org.apache.accumulo.simple.examples.mapreduce.TeraSortIngest',
-            '--count', ROWS,  
-            '-nk', 10, '-xk', 10,
-            '-nk', 78, '-xk', 78,
-            '-t', 'sorted',
-            '-i', INSTANCE_NAME,
-            '-z', ZOOKEEPERS,
-            '-u', ROOT,
-            '-p', ROOT_PASSWORD,
-            '--splits', 4]))
-        self.comment("Looking for '999' in all rows")
-        self.wait(self.runOn(self.masterHost(), [
-            ACCUMULO_HOME+'/bin/tool.sh',
-            examplesJar,
-            'org.apache.accumulo.simple.examples.mapreduce.RegexExample',
-            '-i', INSTANCE_NAME,
-            '-z', ZOOKEEPERS,
-            '-u', ROOT,
-            '-p', ROOT_PASSWORD,
-            '-t', 'sorted',
-            '--rowRegex', '.*999.*',
-            '/tmp/nines']))
-        self.comment("Generating hashes of each row into a new table")
-        self.wait(self.runOn(self.masterHost(), [
-            ACCUMULO_HOME+'/bin/tool.sh',
-            examplesJar,
-            'org.apache.accumulo.simple.examples.mapreduce.RowHash',
-            '-i', INSTANCE_NAME,
-            '-z', ZOOKEEPERS,
-            '-u', ROOT,
-            '-p', ROOT_PASSWORD,
-            '-t', 'sorted',
-            '--column', ':',
-            'sortedHashed',
-            ]))
-        self.comment("Exporting the table to HDFS")
-        self.wait(self.runOn(self.masterHost(), [
-            ACCUMULO_HOME+'/bin/tool.sh',
-            examplesJar,
-            'org.apache.accumulo.simple.examples.mapreduce.TableToFile',
-            '-i', INSTANCE_NAME,
-            '-z', ZOOKEEPERS,
-            '-u', ROOT,
-            '-p', ROOT_PASSWORD,
-            '-t', 'sorted',
-            '--output', '/tmp/tableFile'
-            ]))
-        self.comment("Running WordCount using Accumulo aggregators")
-        self.wait(self.runOn(self.masterHost(), [
-            'hadoop', 'fs', '-rmr', "/tmp/wc"
-            ]))
-        self.wait(self.runOn(self.masterHost(), [
-            'hadoop', 'fs', '-mkdir', "/tmp/wc"
-            ]))
-        self.wait(self.runOn(self.masterHost(), [
-            'hadoop', 'fs', '-copyFromLocal', ACCUMULO_HOME + "/README", "/tmp/wc/Accumulo.README"
-            ]))
-        self.ashell('createtable wordCount\nsetiter -scan -majc -minc -p 10 -n sum -class org.apache.accumulo.core.iterators.user.SummingCombiner\n\ncount\n\nSTRING\nquit\n')
-        self.wait(self.runOn(self.masterHost(), [
-            ACCUMULO_HOME+'/bin/tool.sh',
-            examplesJar,
-            'org.apache.accumulo.simple.examples.mapreduce.WordCount',
-            '-i', INSTANCE_NAME,
-            '-z', ZOOKEEPERS,
-            '--input', '/tmp/wc',
-            '-t', 'wctable'
-            ]))
-        self.comment("Inserting data with a batch writer")
-        self.runExample(['org.apache.accumulo.simple.examples.helloworld.InsertWithBatchWriter',
-                        '-i', INSTANCE_NAME,
-                        '-z', ZOOKEEPERS,
-                        '-t', 'helloBatch',
-                        '-u', ROOT,
-                        '-p', ROOT_PASSWORD])
-        self.comment("Reading data")
-        self.runExample(['org.apache.accumulo.simple.examples.helloworld.ReadData',
-                        '-i', INSTANCE_NAME,
-                        '-z', ZOOKEEPERS,
-                        '-t', 'helloBatch',
-                        '-u', ROOT,
-                        '-p', ROOT_PASSWORD])
-        self.comment("Running isolated scans")
-        self.runExample(['org.apache.accumulo.simple.examples.isolation.InterferenceTest',
-                        '-i', INSTANCE_NAME,
-                        '-z', ZOOKEEPERS,
-                        '-u', ROOT,
-                        '-p', ROOT_PASSWORD,
-                         '-t', 'itest1',
-                         '--iterations', 100000,
-                         '--isolated'])
-        self.comment("Running scans without isolation")
-        self.runExample(['org.apache.accumulo.simple.examples.isolation.InterferenceTest',
-                        '-i', INSTANCE_NAME,
-                        '-z', ZOOKEEPERS,
-                        '-u', ROOT,
-                        '-p', ROOT_PASSWORD,
-                         '-t', 'itest2',
-                         '--iterations', 100000])
-        self.comment("Using some example constraints")
-        self.ashell('\n'.join([
-            'createtable testConstraints',
-            'constraint -t testConstraints -a org.apache.accumulo.examples.simple.constraints.NumericValueConstraint',
-            'constraint -t testConstraints -a org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint',
-            'insert r1 cf1 cq1 1111',
-            'insert r1 cf1 cq1 ABC',
-            'scan',
-            'quit'
-            ]), 1)
-        self.comment("Performing some row operations")
-        self.runExample(['org.apache.accumulo.simple.examples.client.RowOperations', 
-                        '-i', INSTANCE_NAME,
-                        '-z', ZOOKEEPERS,
-                        '-u', ROOT,
-                        '-p', ROOT_PASSWORD ])
-        self.comment("Using the batch writer")
-        self.runExample(['org.apache.accumulo.simple.examples.client.SequentialBatchWriter',
-                        '-i', INSTANCE_NAME,
-                        '-z', ZOOKEEPERS,
-                        '-u', ROOT,
-                        '-p', ROOT_PASSWORD,
-                         '-t', table,
-                         '--start', min,
-                         '--num', count,
-                         '--size', valueSize,
-                         '--batchMemory', memory,
-                         '--batchLatency', latency,
-                         '--batchThreads', numThreads,
-                         '--vis', visibility])
-        self.comment("Reading and writing some data")
-        self.runExample(['org.apache.accumulo.simple.examples.client.ReadWriteExample',
-                           '-i', INSTANCE_NAME, 
-                           '-z', ZOOKEEPERS, 
-                           '-u', ROOT, 
-                           '-p', ROOT_PASSWORD, 
-                           '--auths', auths,
-                           '-t', table,
-                           '--createtable', 
-                           '-c', 
-                           '--debug'])
-        self.comment("Deleting some data")
-        self.runExample(['org.apache.accumulo.simple.examples.client.ReadWriteExample',
-                           '-i', INSTANCE_NAME, 
-                           '-z', ZOOKEEPERS, 
-                           '-u', ROOT, 
-                           '-p', ROOT_PASSWORD, 
-                           '-s', auths,
-                           '-t', table,
-                           '-d', 
-                           '--debug'])
-        self.comment("Writing some random data with the batch writer")
-        self.runExample(['org.apache.accumulo.simple.examples.client.RandomBatchWriter',
-                           '-i', INSTANCE_NAME, 
-                           '-z', ZOOKEEPERS, 
-                           '-u', ROOT, 
-                           '-p', ROOT_PASSWORD, 
-                           '-t', table,
-                           '--num', count, 
-                           '--min', min, 
-                           '--max', max, 
-                           '--size', valueSize, 
-                           '--batchMemory', memory, 
-                           '--batchLatency', latency, 
-                           '--batchThreads', numThreads, 
-                           '--vis', visibility])
-        self.comment("Writing some random data with the batch writer")
-        self.runExample(['org.apache.accumulo.simple.examples.client.RandomBatchScanner',
-                           '-i', INSTANCE_NAME, 
-                           '-z', ZOOKEEPERS, 
-                           '-u', ROOT, 
-                           '-p', ROOT_PASSWORD, 
-                           '-t', table,
-                           '--num', count, 
-                           '--min', min, 
-                           '--max', max, 
-                           '--size', valueSize, 
-                           '--scanThreads', numThreads, 
-                           '--auths', auths]);
-        self.comment("Running an example table operation (Flush)")
-        self.runExample(['org.apache.accumulo.simple.examples.client.Flush',
-                           '-i', INSTANCE_NAME, 
-                           '-z', ZOOKEEPERS, 
-                           '-u', ROOT, 
-                           '-p', ROOT_PASSWORD, 
-                           '-t', table])
-        self.shutdown_accumulo();
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(Examples())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/fateStartvation.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/fateStartvation.py b/test/system/auto/simple/fateStartvation.py
deleted file mode 100755
index 6c5151c..0000000
--- a/test/system/auto/simple/fateStartvation.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class FateStarvationTest(JavaTest):
-    "Try to trigger a bug that was found in FATE"
-
-    order = 21
-    testClass="org.apache.accumulo.test.functional.FateStarvationTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(FateStarvationTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/gc.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/gc.py b/test/system/auto/simple/gc.py
deleted file mode 100755
index f06467f..0000000
--- a/test/system/auto/simple/gc.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import glob
-import logging
-import unittest
-import sleep
-import signal
-
-from TestUtils import ROOT, ROOT_PASSWORD, INSTANCE_NAME, TestUtilsMixin, ACCUMULO_HOME, ACCUMULO_DIR, ZOOKEEPERS, ID
-from simple.readwrite import SunnyDayTest
-
-log = logging.getLogger('test.auto')
-
-class GCTest(SunnyDayTest):
-
-    order = SunnyDayTest.order + 1
-
-    settings = SunnyDayTest.settings.copy()
-    settings.update({
-        'gc.cycle.start': 1,
-        'gc.cycle.delay': 1,
-        'tserver.memory.maps.max':'5K',
-        'tserver.compaction.major.delay': 1,
-        })
-    tableSettings = SunnyDayTest.tableSettings.copy()
-    tableSettings['test_ingest'] = { 
-        'table.split.threshold': '5K',
-        }
-
-    def fileCount(self):
-        handle = self.runOn(self.masterHost(),
-                            ['hadoop', 'fs', '-lsr', ACCUMULO_DIR+"/tables"])
-        out, err = handle.communicate()
-        return len(out.split('\n'))
-
-    def waitForFileCountToStabilize(self):
-        count = self.fileCount()
-        while True:
-            self.sleep(5)
-            update = self.fileCount()
-            if update == count:
-                return count
-            count = update
-
-    def runTest(self):
-        self.stop_gc(self.masterHost())
-        self.waitForStop(self.ingester, 60)
-        self.shell(self.masterHost(), 'flush -t test_ingest -w')
-
-        count = self.waitForFileCountToStabilize()
-        gc = self.runOn(self.masterHost(), [self.accumulo_sh(), 'gc'])
-        self.sleep(10)
-        collected = self.fileCount()
-        self.assert_(count > collected)
-
-        handle = self.runOn(self.masterHost(),
-                            ['grep', '-q', 'root_tablet'] +
-                            glob.glob(os.path.join(ACCUMULO_HOME,'logs',ID,'gc_*')))
-        out, err = handle.communicate()
-        self.assert_(handle.returncode != 0)
-        self.pkill(self.masterHost(), 'app=gc', signal.SIGHUP)
-        self.wait(gc)
-        log.info("Verifying Ingestion")
-        self.waitForStop(self.verify(self.masterHost(), self.options.rows),
-                         10)
-        self.shutdown_accumulo()
-        
-class GCLotsOfCandidatesTest(TestUtilsMixin, unittest.TestCase):
-
-    order = GCTest.order + 1
-    settings = SunnyDayTest.settings.copy()
-    settings.update({
-        'gc.cycle.start': 1,
-        'gc.cycle.delay': 1
-        })
-
-    def runTest(self):
-        self.stop_gc(self.masterHost())
-        log.info("Filling !METADATA table with bogus delete flags")
-        prep = self.runOn(self.masterHost(),
-                        [self.accumulo_sh(), 
-                         'org.apache.accumulo.test.GCLotsOfCandidatesTest',
-                         '-i',INSTANCE_NAME,'-z', ZOOKEEPERS,'-u', ROOT, '-p', ROOT_PASSWORD])
-        out, err = prep.communicate()
-        self.assert_(prep.returncode == 0)
-
-        log.info("Running GC with low memory allotment")
-        gc = self.runOn('localhost',
-                        ['bash', '-c', 'ACCUMULO_GC_OPTS="-Xmx10m " ' + self.accumulo_sh() + ' gc'])
-        self.sleep(10)
-        self.pkill('localhost', 'app=gc', signal.SIGHUP)
-        self.wait(gc)
-
-        log.info("Verifying GC ran out of memory and cycled instead of giving up")
-        grep = self.runOn('localhost',
-                        ['grep', '-q', 'delete candidates has exceeded'] +
-                        glob.glob(os.path.join(ACCUMULO_HOME,'logs', ID, 'gc_*')))
-        out, err = grep.communicate()
-        self.assert_(grep.returncode == 0)
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(GCTest())
-    result.addTest(GCLotsOfCandidatesTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/largeRow.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/largeRow.py b/test/system/auto/simple/largeRow.py
deleted file mode 100755
index b0ca92a..0000000
--- a/test/system/auto/simple/largeRow.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class LargeRowTest(JavaTest):
-    "Test large rows"
-
-    order = 21
-    testClass="org.apache.accumulo.test.functional.LargeRowTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(LargeRowTest())
-    return result

http://git-wip-us.apache.org/repos/asf/accumulo/blob/aea43136/test/system/auto/simple/logicalTime.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/logicalTime.py b/test/system/auto/simple/logicalTime.py
deleted file mode 100755
index 406eda6..0000000
--- a/test/system/auto/simple/logicalTime.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from JavaTest import JavaTest
-
-import unittest
-
-class LogicalTimeTest(JavaTest):
-    "Logical Time Test"
-
-    order = 21
-    testClass="org.apache.accumulo.test.functional.LogicalTimeTest"
-
-
-def suite():
-    result = unittest.TestSuite()
-    result.addTest(LogicalTimeTest())
-    return result


[25/50] [abbrv] git commit: ACCUMULO-1537 completed the conversion of functional tests to IT; also converted ShellServerTest to an IT

Posted by ct...@apache.org.
ACCUMULO-1537 completed the conversion of functional tests to IT; also converted ShellServerTest to an IT

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1501497 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/7a1075a4
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/7a1075a4
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/7a1075a4

Branch: refs/heads/ACCUMULO-1496
Commit: 7a1075a47299afd6d27225b17814c11eb62432d2
Parents: 8d815a8
Author: Eric C. Newton <ec...@apache.org>
Authored: Tue Jul 9 19:50:06 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Tue Jul 9 19:50:06 2013 +0000

----------------------------------------------------------------------
 .../accumulo/core/cli/ClientOnDefaultTable.java |   2 +-
 .../minicluster/MiniAccumuloCluster.java        |  96 +-
 .../minicluster/ProcessNotFoundException.java   |  23 +
 .../accumulo/minicluster/ProcessReference.java  |  44 +
 .../server/tabletserver/TabletServer.java       |   3 +
 .../org/apache/accumulo/test/VerifyIngest.java  |   2 +-
 .../test/functional/CacheTestClean.java         |  13 +-
 .../test/functional/MasterFailoverIT.java       |  59 ++
 .../accumulo/test/functional/NativeMapTest.java |   1 +
 .../test/functional/SplitRecoveryIT.java        |  30 +
 test/src/test/c/fake_disk_failure.c             |  51 ++
 .../org/apache/accumulo/test/ShellServerIT.java | 865 +++++++++++++++++++
 .../apache/accumulo/test/ShellServerTest.java   | 805 -----------------
 .../apache/accumulo/test/TableOperationsIT.java |   6 +-
 .../accumulo/test/functional/BinaryIT.java      |   2 +-
 .../accumulo/test/functional/BloomFilterIT.java |  20 +-
 .../apache/accumulo/test/functional/BulkIT.java |  10 +-
 .../functional/BulkSplitOptimizationIT.java     |   3 +-
 .../test/functional/ChaoticBlancerIT.java       |   6 +-
 .../accumulo/test/functional/DeleteIT.java      |   5 +-
 .../test/functional/DynamicThreadPoolsIT.java   |  13 +-
 .../test/functional/HalfDeadTServerIT.java      | 158 ++++
 .../test/functional/LateLastContactIT.java      |  44 +
 .../accumulo/test/functional/MacTest.java       |  10 +-
 .../accumulo/test/functional/MapReduceIT.java   |   2 +-
 .../accumulo/test/functional/MaxOpenIT.java     |   2 +-
 .../test/functional/MetadataSplitIT.java        |  50 ++
 .../accumulo/test/functional/NativeMapIT.java   |  30 +
 .../accumulo/test/functional/PermissionsIT.java |   2 +-
 .../accumulo/test/functional/RestartIT.java     | 144 +++
 .../test/functional/RestartStressIT.java        |  73 ++
 .../test/functional/ServerSideErrorIT.java      |   2 +-
 .../test/functional/SparseColumnFamilyIT.java   |   5 -
 .../accumulo/test/functional/SplitIT.java       |   2 +-
 .../accumulo/test/functional/TableIT.java       |  13 +-
 .../accumulo/test/functional/TimeoutIT.java     |   6 +-
 .../accumulo/test/functional/VisibilityIT.java  |   2 +-
 .../test/functional/WriteAheadLogIT.java        |  69 ++
 .../accumulo/test/functional/ZooCacheIT.java    |  55 ++
 test/system/auto/fake_disk_failure.c            |  55 --
 test/system/auto/simple/masterFailover.py       |  51 --
 test/system/auto/simple/nativeMap.py            |  42 -
 test/system/auto/simple/shell.py                | 474 ----------
 test/system/auto/simple/splitRecovery.py        |  30 -
 test/system/auto/simple/wal.py                  |  68 --
 test/system/auto/simple/zoo.py                  |  54 --
 test/system/auto/simple/zooCacheTest.py         |  51 --
 test/system/auto/stress/halfDead.py             | 101 ---
 test/system/auto/stress/migrations.py           |  82 --
 test/system/auto/stress/msplit.py               |  53 --
 test/system/auto/stress/restart.py              | 199 -----
 test/system/auto/stress/table.py                |  61 --
 test/system/auto/stress/weird.py                |  49 --
 53 files changed, 1839 insertions(+), 2259 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/core/src/main/java/org/apache/accumulo/core/cli/ClientOnDefaultTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/cli/ClientOnDefaultTable.java b/core/src/main/java/org/apache/accumulo/core/cli/ClientOnDefaultTable.java
index 44aca71..b86638a 100644
--- a/core/src/main/java/org/apache/accumulo/core/cli/ClientOnDefaultTable.java
+++ b/core/src/main/java/org/apache/accumulo/core/cli/ClientOnDefaultTable.java
@@ -31,7 +31,7 @@ public class ClientOnDefaultTable extends ClientOpts {
   }
   
   @Parameter(names = "--table", description = "table to use")
-  String tableName;
+  public String tableName;
   
   public String getTableName() {
     if (tableName == null)

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
index de85062..ea29b92 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloCluster.java
@@ -25,9 +25,12 @@ import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
@@ -39,6 +42,7 @@ import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.ZooKeeperInstance;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.master.thrift.MasterGoalState;
+import org.apache.accumulo.core.util.Daemon;
 import org.apache.accumulo.core.util.Pair;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.accumulo.server.master.Master;
@@ -48,6 +52,7 @@ import org.apache.accumulo.server.util.Initialize;
 import org.apache.accumulo.server.util.PortUtils;
 import org.apache.accumulo.server.util.time.SimpleTimer;
 import org.apache.accumulo.start.Main;
+import org.apache.commons.io.FileUtils;
 import org.apache.zookeeper.server.ZooKeeperServerMain;
 
 /**
@@ -58,7 +63,7 @@ import org.apache.zookeeper.server.ZooKeeperServerMain;
  */
 public class MiniAccumuloCluster {
   
-  public static class LogWriter extends Thread {
+  public static class LogWriter extends Daemon {
     private BufferedReader in;
     private BufferedWriter out;
 
@@ -66,7 +71,6 @@ public class MiniAccumuloCluster {
      * @throws IOException
      */
     public LogWriter(InputStream stream, File logFile) throws IOException {
-      this.setDaemon(true);
       this.in = new BufferedReader(new InputStreamReader(stream));
       out = new BufferedWriter(new FileWriter(logFile));
 
@@ -108,9 +112,9 @@ public class MiniAccumuloCluster {
   }
 
   private boolean initialized = false;
-  private Process zooKeeperProcess;
-  private Process masterProcess;
-  private Process[] tabletServerProcesses;
+  private Process zooKeeperProcess = null;
+  private Process masterProcess = null;
+  private List<Process> tabletServerProcesses = new ArrayList<Process>();
 
   private Set<Pair<ServerType,Integer>> debugPorts = new HashSet<Pair<ServerType,Integer>>();
 
@@ -140,6 +144,7 @@ public class MiniAccumuloCluster {
 
     ArrayList<String> argList = new ArrayList<String>();
     argList.addAll(Arrays.asList(javaBin, "-cp", classpath));
+    argList.add("-Djava.library.path=" + config.getLibDir());
     argList.addAll(extraJvmOpts);
     argList.addAll(Arrays.asList("-XX:+UseConcMarkSweepGC", "-XX:CMSInitiatingOccupancyFraction=75", Main.class.getName(), className));
     argList.addAll(Arrays.asList(args));
@@ -236,6 +241,15 @@ public class MiniAccumuloCluster {
     zooCfg.store(fileWriter, null);
 
     fileWriter.close();
+
+    File nativeMap = new File(config.getLibDir().getAbsolutePath() + "/native/map");
+    nativeMap.mkdirs();
+    String testRoot = new File(new File(System.getProperty("user.dir")).getParent() + "/server/src/main/c++/nativeMap").getAbsolutePath();
+    for (String file : new File(testRoot).list()) {
+      File src = new File(testRoot, file);
+      if (src.isFile() && file.startsWith("libNativeMap"))
+        FileUtils.copyFile(src, new File(nativeMap, file));
+    }
   }
 
   /**
@@ -247,8 +261,6 @@ public class MiniAccumuloCluster {
    *           if already started
    */
   public void start() throws IOException, InterruptedException {
-    if (zooKeeperProcess != null)
-      throw new IllegalStateException("Already started");
 
     if (!initialized) {
       
@@ -265,12 +277,13 @@ public class MiniAccumuloCluster {
         }
       });
     }
-      
-    zooKeeperProcess = exec(Main.class, ServerType.ZOOKEEPER, ZooKeeperServerMain.class.getName(), zooCfgFile.getAbsolutePath());
 
-    // sleep a little bit to let zookeeper come up before calling init, seems to work better
-    UtilWaitThread.sleep(250);
-    
+    if (zooKeeperProcess == null) {
+      zooKeeperProcess = exec(Main.class, ServerType.ZOOKEEPER, ZooKeeperServerMain.class.getName(), zooCfgFile.getAbsolutePath());
+      // sleep a little bit to let zookeeper come up before calling init, seems to work better
+      UtilWaitThread.sleep(250);
+    }
+
     if (!initialized) {
       Process initProcess = exec(Initialize.class, "--instance-name", config.getInstanceName(), "--password", config.getRootPassword(), "--username", "root");
       int ret = initProcess.waitFor();
@@ -280,17 +293,17 @@ public class MiniAccumuloCluster {
       initialized = true; 
     }
 
-    tabletServerProcesses = new Process[config.getNumTservers()];
-    for (int i = 0; i < config.getNumTservers(); i++) {
-      tabletServerProcesses[i] = exec(TabletServer.class, ServerType.TABLET_SERVER);
+    for (int i = tabletServerProcesses.size(); i < config.getNumTservers(); i++) {
+      tabletServerProcesses.add(exec(TabletServer.class, ServerType.TABLET_SERVER));
     }
     Process goal = exec(Main.class, SetGoalState.class.getName(), MasterGoalState.NORMAL.toString());
     int ret = goal.waitFor();
     if (ret != 0) {
       throw new RuntimeException("Could not set master goal state, process returned " + ret);
     }
-
-    masterProcess = exec(Master.class, ServerType.MASTER);
+    if (masterProcess == null) {
+      masterProcess = exec(Master.class, ServerType.MASTER);
+    }
   }
 
   private List<String> buildRemoteDebugParams(int port) {
@@ -305,6 +318,53 @@ public class MiniAccumuloCluster {
   public Set<Pair<ServerType,Integer>> getDebugPorts() {
     return debugPorts;
   }
+  
+  List<ProcessReference> references(Process... procs) {
+    List<ProcessReference> result = new ArrayList<ProcessReference>();
+    for (Process proc : procs) {
+      result.add(new ProcessReference(proc));
+    }
+    return result;
+  }
+  
+  public Map<ServerType, Collection<ProcessReference>> getProcesses() {
+    Map<ServerType, Collection<ProcessReference>> result = new HashMap<ServerType, Collection<ProcessReference>>();
+    result.put(ServerType.MASTER, references(masterProcess));
+    result.put(ServerType.TABLET_SERVER, references(tabletServerProcesses.toArray(new Process[0])));
+    result.put(ServerType.ZOOKEEPER, references(zooKeeperProcess));
+    return result;
+  }
+  
+  public void killProcess(ServerType type, ProcessReference proc) throws ProcessNotFoundException {
+    boolean found = false;
+    switch (type) {
+      case MASTER:
+        if (proc.equals(masterProcess)) {
+          masterProcess.destroy();
+          masterProcess = null;
+          found = true;
+        }
+        break;
+      case TABLET_SERVER:
+        for (Process tserver: tabletServerProcesses) {
+          if (proc.equals(tserver)) {
+            tabletServerProcesses.remove(tserver);
+            found = true;
+            break;
+          }
+        }
+        break;
+      case ZOOKEEPER:
+        if (proc.equals(zooKeeperProcess)) {
+          zooKeeperProcess.destroy();
+          zooKeeperProcess = null;
+          found = true;
+        }
+        break;
+    }
+    if (!found)
+      throw new ProcessNotFoundException();
+  }
 
   /**
    * @return Accumulo instance name
@@ -342,7 +402,7 @@ public class MiniAccumuloCluster {
       lw.flush();
     zooKeeperProcess = null;
     masterProcess = null;
-    tabletServerProcesses = null;
+    tabletServerProcesses.clear();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/minicluster/src/main/java/org/apache/accumulo/minicluster/ProcessNotFoundException.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/ProcessNotFoundException.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/ProcessNotFoundException.java
new file mode 100644
index 0000000..ed13760
--- /dev/null
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/ProcessNotFoundException.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.minicluster;
+
+public class ProcessNotFoundException extends Exception {
+
+  private static final long serialVersionUID = 1L;
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/minicluster/src/main/java/org/apache/accumulo/minicluster/ProcessReference.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/ProcessReference.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/ProcessReference.java
new file mode 100644
index 0000000..5de99a2
--- /dev/null
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/ProcessReference.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.accumulo.minicluster;
+
+/**
+ * Opaque handle to a process.
+ */
+public class ProcessReference {
+  private Process process;
+
+  ProcessReference(Process process) {
+    this.process = process;
+  }
+  
+  @Override
+  public String toString() {
+    return process.toString();
+  }
+  
+  @Override
+  public int hashCode() {
+    return process.hashCode();
+  }
+  
+  @Override
+  public boolean equals(Object obj) {
+    return process.equals(obj);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index fe2c2de..52548bf 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -204,6 +204,7 @@ import org.apache.accumulo.trace.instrument.Trace;
 import org.apache.accumulo.trace.instrument.thrift.TraceWrap;
 import org.apache.accumulo.trace.thrift.TInfo;
 import org.apache.commons.collections.map.LRUMap;
+import org.apache.hadoop.fs.FSError;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
@@ -1550,6 +1551,8 @@ public class TabletServer extends AbstractMetricsImpl implements org.apache.accu
             break;
           } catch (IOException ex) {
             log.warn("logging mutations failed, retrying");
+          } catch (FSError ex) { // happens when DFS is localFS
+            log.warn("logging mutations failed, retrying");
           } catch (Throwable t) {
             log.error("Unknown exception logging mutations, counts for mutations in flight not decremented!", t);
             throw new RuntimeException(t);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java b/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
index c6aa0b1..7d710fa 100644
--- a/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
+++ b/test/src/main/java/org/apache/accumulo/test/VerifyIngest.java
@@ -150,7 +150,7 @@ public class VerifyIngest {
         
         Key startKey = new Key(new Text("row_" + String.format("%010d", expectedRow)));
         
-        Scanner scanner = connector.createScanner("test_ingest", labelAuths);
+        Scanner scanner = connector.createScanner(opts.getTableName(), labelAuths);
         scanner.setBatchSize(scanOpts.scanBatchSize);
         scanner.setRange(new Range(startKey, endKey));
         for (int j = 0; j < opts.cols; j++) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/main/java/org/apache/accumulo/test/functional/CacheTestClean.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/CacheTestClean.java b/test/src/main/java/org/apache/accumulo/test/functional/CacheTestClean.java
index c522914..3fe94e1 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CacheTestClean.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CacheTestClean.java
@@ -17,11 +17,11 @@
 package org.apache.accumulo.test.functional;
 
 import java.io.File;
-import java.util.Arrays;
 
 import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
 import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.commons.io.FileUtils;
 
 public class CacheTestClean {
   
@@ -38,14 +38,9 @@ public class CacheTestClean {
       zoo.recursiveDelete(rootDir, NodeMissingPolicy.FAIL);
     }
     
-    if (!reportDir.exists()) {
-      reportDir.mkdir();
-    } else {
-      File[] files = reportDir.listFiles();
-      if (files.length != 0)
-        throw new Exception("dir " + reportDir + " is not empty: " + Arrays.asList(files));
+    if (reportDir.exists()) {
+      FileUtils.deleteDirectory(reportDir);
     }
-    
+    reportDir.mkdirs();
   }
-  
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java b/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
new file mode 100644
index 0000000..9934874
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/MasterFailoverIT.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import java.util.Collections;
+
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.minicluster.ProcessReference;
+import org.apache.accumulo.minicluster.ServerType;
+import org.apache.accumulo.server.master.Master;
+import org.apache.accumulo.test.TestIngest;
+import org.apache.accumulo.test.VerifyIngest;
+import org.junit.Test;
+
+public class MasterFailoverIT extends MacTest {
+  
+  @Override
+  public void configure(MiniAccumuloConfig cfg) {
+    cfg.setSiteConfig(Collections.singletonMap(Property.INSTANCE_ZK_TIMEOUT.getKey(), "5s"));
+  }
+
+  @Test(timeout=30*1000)
+  public void test() throws Exception {
+    Connector c = getConnector();
+    c.tableOperations().create("test_ingest");
+    TestIngest.Opts opts = new TestIngest.Opts();
+    TestIngest.ingest(c, opts, BWOPTS);
+    for (ProcessReference master : cluster.getProcesses().get(ServerType.MASTER)) {
+      cluster.killProcess(ServerType.MASTER, master);
+    }
+    // start up a new one
+    Process p = cluster.exec(Master.class);
+    // talk to it
+    c.tableOperations().rename("test_ingest", "test_ingest2");
+    try {
+      VerifyIngest.Opts vopts = new VerifyIngest.Opts();
+      vopts.tableName = "test_ingest2";
+      VerifyIngest.verifyIngest(c, vopts, SOPTS);
+    } finally {
+      p.destroy();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/main/java/org/apache/accumulo/test/functional/NativeMapTest.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/NativeMapTest.java b/test/src/main/java/org/apache/accumulo/test/functional/NativeMapTest.java
index faecba0..0135557 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/NativeMapTest.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/NativeMapTest.java
@@ -86,6 +86,7 @@ public class NativeMapTest {
     nmt.testBinary();
     nmt.testEmpty();
     nmt.testConcurrentIter();
+    System.out.println("Ran to completion");
   }
   
   // END JUnit methods

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
new file mode 100644
index 0000000..6ada2c2
--- /dev/null
+++ b/test/src/main/java/org/apache/accumulo/test/functional/SplitRecoveryIT.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test.functional;
+
+import static org.junit.Assert.assertEquals;
+
+import org.junit.Test;
+
+public class SplitRecoveryIT extends MacTest {
+  
+  @Test(timeout=10*1000)
+  public void test() throws Exception {
+    assertEquals(0, cluster.exec(SplitRecoveryTest.class).waitFor());
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/c/fake_disk_failure.c
----------------------------------------------------------------------
diff --git a/test/src/test/c/fake_disk_failure.c b/test/src/test/c/fake_disk_failure.c
new file mode 100644
index 0000000..dbb2e5e
--- /dev/null
+++ b/test/src/test/c/fake_disk_failure.c
@@ -0,0 +1,51 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#include <unistd.h>
+#include <dlfcn.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+static
+void test_pause() {
+  static char trickFile[1024] = "";
+  static char pid[10] = "";
+  if (trickFile[0] == '\0') {
+    strcpy(trickFile, getenv("TRICK_FILE"));
+  }
+
+  while (access(trickFile, R_OK) == 0) {
+    fprintf(stdout, "sleeping\n");
+    fflush(stdout);
+    sleep(1);
+  }
+}
+
+ssize_t write(int fd, const void *buf, size_t count) {
+  void * real_write = dlsym(RTLD_NEXT, "write");
+  ssize_t (*real_write_t)(int, const void*, size_t) = real_write;
+
+  test_pause();
+  return real_write_t(fd, buf, count);
+}
+
+ssize_t read(int fd, void *buf, size_t count) {
+  void * real_read = dlsym(RTLD_NEXT, "read");
+  ssize_t (*real_read_t)(int, void*, size_t) = real_read;
+  test_pause();
+  return real_read_t(fd, buf, count);
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java b/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
new file mode 100644
index 0000000..597722b
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/ShellServerIT.java
@@ -0,0 +1,865 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.io.PrintWriter;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.util.Map.Entry;
+
+import jline.console.ConsoleReader;
+
+import org.apache.accumulo.core.Constants;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.conf.AccumuloConfiguration;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.file.FileOperations;
+import org.apache.accumulo.core.file.FileSKVWriter;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.core.util.shell.Shell;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
+import org.apache.accumulo.minicluster.MiniAccumuloConfig;
+import org.apache.accumulo.server.trace.TraceServer;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.tools.DistCp;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+public class ShellServerIT {
+  public static class TestOutputStream extends OutputStream {
+    StringBuilder sb = new StringBuilder();
+    
+    @Override
+    public void write(int b) throws IOException {
+      sb.append((char) (0xff & b));
+    }
+    
+    public String get() {
+      return sb.toString();
+    }
+    
+    public void clear() {
+      sb.setLength(0);
+    }
+  }
+  
+  public static class StringInputStream extends InputStream {
+    private String source = "";
+    private int offset = 0;
+    
+    @Override
+    public int read() throws IOException {
+      if (offset == source.length())
+        return '\n';
+      else
+        return source.charAt(offset++);
+    }
+    
+    public void set(String other) {
+      source = other;
+      offset = 0;
+    }
+  }
+  
+  private static String secret = "superSecret";
+  public static TemporaryFolder folder = new TemporaryFolder();
+  public static MiniAccumuloCluster cluster;
+  public static TestOutputStream output;
+  public static StringInputStream input;
+  public static Shell shell;
+  private static Process traceProcess;
+  
+  static String exec(String cmd) throws IOException {
+    output.clear();
+    shell.execCommand(cmd, true, true);
+    return output.get();
+  }
+  
+  static String exec(String cmd, boolean expectGoodExit) throws IOException {
+    String result = exec(cmd);
+    if (expectGoodExit)
+      assertGoodExit("", true);
+    else
+      assertBadExit("", true);
+    return result;
+  }
+  
+  static String exec(String cmd, boolean expectGoodExit, String expectString) throws IOException {
+    return exec(cmd, expectGoodExit, expectString, true);
+  }
+  
+  static String exec(String cmd, boolean expectGoodExit, String expectString, boolean stringPresent) throws IOException {
+    String result = exec(cmd);
+    if (expectGoodExit)
+      assertGoodExit(expectString, stringPresent);
+    else
+      assertBadExit(expectString, stringPresent);
+    return result;
+  }
+  
+  static void assertGoodExit(String s, boolean stringPresent) {
+    Shell.log.info(output.get());
+    assertEquals(0, shell.getExitCode());
+    
+    if (s.length() > 0)
+      assertEquals(s + " present in " + output.get() + " was not " + stringPresent, stringPresent, output.get().contains(s));
+  }
+  
+  static void assertBadExit(String s, boolean stringPresent) {
+    Shell.log.debug(output.get());
+    assertTrue(shell.getExitCode() > 0);
+    if (s.length() > 0)
+      assertEquals(s + " present in " + output.get() + " was not " + stringPresent, stringPresent, output.get().contains(s));
+    shell.resetExitCode();
+  }
+  
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    folder.create();
+    MiniAccumuloConfig cfg = new MiniAccumuloConfig(folder.newFolder("miniAccumulo"), secret);
+    cluster = new MiniAccumuloCluster(cfg);
+    cluster.start();
+    
+    // history file is updated in $HOME
+    System.setProperty("HOME", folder.getRoot().getAbsolutePath());
+    
+    // start the shell
+    output = new TestOutputStream();
+    input = new StringInputStream();
+    shell = new Shell(new ConsoleReader(input, output));
+    shell.setLogErrorsToConsole();
+    shell.config("-u", "root", "-p", secret, "-z", cluster.getConfig().getInstanceName(), cluster.getConfig().getZooKeepers());
+    exec("quit", true);
+    shell.start();
+    shell.setExit(false);
+    
+    // use reflection to call this method so it does not need to be made public
+    Method method = cluster.getClass().getDeclaredMethod("exec", Class.class, String[].class);
+    method.setAccessible(true);
+    traceProcess = (Process) method.invoke(cluster, TraceServer.class, new String[0]);
+    
+    // give the tracer some time to start
+    UtilWaitThread.sleep(1000);
+  }
+  
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    cluster.stop();
+    traceProcess.destroy();
+    folder.delete();
+  }
+  
+  @After
+  public void tearDown() throws Exception {
+    Connector c = cluster.getConnector("root", secret);
+    for (String table : c.tableOperations().list()) {
+      if (!table.equals(MetadataTable.NAME) && !table.equals(RootTable.NAME))
+        c.tableOperations().delete(table);
+    }
+  }
+  
+  @Test(timeout = 30*1000)
+  public void exporttableImporttable() throws Exception {
+    // exporttable / importtable
+    exec("createtable t -evc", true);
+    make10();
+    exec("addsplits row5", true);
+    exec("config -t t -s table.split.threshold=345M", true);
+    exec("offline t", true);
+    String export = "file://" + folder.newFolder().toString();
+    exec("exporttable -t t " + export, true);
+    DistCp cp = newDistCp();
+    String import_ = "file://" + folder.newFolder().toString();
+    cp.run(new String[] {"-f", export + "/distcp.txt", import_});
+    exec("importtable t2 " + import_, true);
+    exec("config -t t2 -np", true, "345M", true);
+    exec("getsplits -t t2", true, "row5", true);
+    exec("constraint --list -t t2", true, "VisibilityConstraint=2", true);
+    exec("onlinetable t", true);
+    exec("deletetable -f t", true);
+    exec("deletetable -f t2", true);
+  }
+  
+  private DistCp newDistCp() {
+    try {
+      @SuppressWarnings("unchecked")
+      Constructor<DistCp>[] constructors = (Constructor<DistCp>[]) DistCp.class.getConstructors();
+      for (Constructor<DistCp> constructor : constructors) {
+        Class<?>[] parameterTypes = constructor.getParameterTypes();
+        if (parameterTypes.length > 0 && parameterTypes[0].equals(Configuration.class)) {
+          if (parameterTypes.length == 1) {
+            return constructor.newInstance(new Configuration());
+          } else if (parameterTypes.length == 2) {
+            return constructor.newInstance(new Configuration(), null);
+          }
+        }
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+    throw new RuntimeException("Unexpected constructors for DistCp");
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void setscaniterDeletescaniter() throws Exception {
+    // setscaniter, deletescaniter
+    exec("createtable t");
+    exec("insert a cf cq 1");
+    exec("insert a cf cq 1");
+    exec("insert a cf cq 1");
+    input.set("true\n\n\nSTRING");
+    exec("setscaniter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n name", true);
+    exec("scan", true, "3", true);
+    exec("deletescaniter -n name", true);
+    exec("scan", true, "1", true);
+    exec("deletetable -f t");
+    
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void execfile() throws Exception {
+    // execfile
+    File file = folder.newFile();
+    PrintWriter writer = new PrintWriter(file.getAbsolutePath());
+    writer.println("about");
+    writer.close();
+    exec("execfile " + file.getAbsolutePath(), true, Constants.VERSION, true);
+    
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void egrep() throws Exception {
+    // egrep
+    exec("createtable t");
+    make10();
+    String lines = exec("egrep row[123]", true);
+    assertTrue(lines.split("\n").length - 1 == 3);
+    exec("deletetable -f t");
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void du() throws Exception {
+    // du
+    exec("createtable t");
+    make10();
+    exec("flush -t t -w");
+    exec("du t", true, " [t]", true);
+    output.clear();
+    shell.execCommand("du -h", false, false);
+    String o = output.get();
+    assertTrue(o.matches(".*26[0-9]\\s\\[t\\]\\n")); // for some reason, there's 1-2 bytes of fluctuation
+    exec("deletetable -f t");
+  }
+  
+  @Test(timeout = 1000)
+  public void debug() throws Exception {
+    exec("debug", true, "off", true);
+    exec("debug on", true);
+    exec("debug", true, "on", true);
+    exec("debug off", true);
+    exec("debug", true, "off", true);
+    exec("debug debug", false);
+    exec("debug debug debug", false);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void user() throws Exception {
+    // createuser, deleteuser, user, users, droptable, grant, revoke
+    input.set("secret\nsecret\n");
+    exec("createuser xyzzy", true);
+    exec("users", true, "xyzzy", true);
+    String perms = exec("userpermissions -u xyzzy", true);
+    assertTrue(perms.contains("Table permissions (!METADATA): Table.READ"));
+    exec("grant -u xyzzy -s System.CREATE_TABLE", true);
+    perms = exec("userpermissions -u xyzzy", true);
+    assertTrue(perms.contains(""));
+    exec("grant -u root -t !METADATA Table.WRITE", true);
+    exec("grant -u root -t !METADATA Table.GOOFY", false);
+    exec("grant -u root -s foo", false);
+    exec("grant -u xyzzy -t !METADATA foo", false);
+    input.set("secret\nsecret\n");
+    exec("user xyzzy", true);
+    exec("createtable t", true, "xyzzy@", true);
+    exec("insert row1 cf cq 1", true);
+    exec("scan", true, "row1", true);
+    exec("droptable -f t", true);
+    exec("deleteuser xyzzy", false, "delete yourself", true);
+    input.set(secret + "\n" + secret + "\n");
+    exec("user root", true);
+    exec("revoke -u xyzzy -s System.CREATE_TABLE", true);
+    exec("revoke -u xyzzy -s System.GOOFY", false);
+    exec("revoke -u xyzzy -s foo", false);
+    exec("revoke -u xyzzy -t !METADATA Table.WRITE", true);
+    exec("revoke -u xyzzy -t !METADATA Table.GOOFY", false);
+    exec("revoke -u xyzzy -t !METADATA foo", false);
+    exec("deleteuser xyzzy", true);
+    exec("users", true, "xyzzy", false);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void iter() throws Exception {
+    // setshelliter, listshelliter, deleteshelliter
+    exec("createtable t");
+    exec("insert a cf cq 1");
+    exec("insert a cf cq 1");
+    exec("insert a cf cq 1");
+    input.set("true\n\n\nSTRING\n");
+    exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -pn sum -n name", true);
+    exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -pn sum -n name", false);
+    exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -pn sum -n other", false);
+    input.set("true\n\n\nSTRING\n");
+    exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -pn sum -n xyzzy", true);
+    exec("scan -pn sum", true, "3", true);
+    exec("listshelliter", true, "Iterator name", true);
+    exec("listshelliter", true, "Iterator xyzzy", true);
+    exec("listshelliter", true, "Profile : sum", true);
+    exec("deleteshelliter -pn sum -n name", true);
+    exec("listshelliter", true, "Iterator name", false);
+    exec("listshelliter", true, "Iterator xyzzy", true);
+    exec("deleteshelliter -pn sum -a", true);
+    exec("listshelliter", true, "Iterator xyzzy", false);
+    exec("listshelliter", true, "Profile : sum", false);
+    exec("deletetable -f t");
+    // list iter
+    exec("createtable t");
+    exec("insert a cf cq 1");
+    exec("insert a cf cq 1");
+    exec("insert a cf cq 1");
+    input.set("true\n\n\nSTRING\n");
+    exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n name", true);
+    exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -n name", false);
+    exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n other", false);
+    input.set("true\n\n\nSTRING\n");
+    exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -n xyzzy", true);
+    exec("scan", true, "3", true);
+    exec("listiter -scan", true, "Iterator name", true);
+    exec("listiter -scan", true, "Iterator xyzzy", true);
+    exec("listiter -minc", true, "Iterator name", false);
+    exec("listiter -minc", true, "Iterator xyzzy", false);
+    exec("deleteiter -scan -n name", true);
+    exec("listiter -scan", true, "Iterator name", false);
+    exec("listiter -scan", true, "Iterator xyzzy", true);
+    exec("deletetable -f t");
+    
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void notable() throws Exception {
+    // notable
+    exec("createtable xyzzy", true);
+    exec("scan", true, " xyzzy>", true);
+    assertTrue(output.get().contains(" xyzzy>"));
+    exec("notable", true);
+    exec("scan", false, "Not in a table context.", true);
+    assertFalse(output.get().contains(" xyzzy>"));
+    exec("deletetable -f xyzzy");
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void sleep() throws Exception {
+    // sleep
+    long now = System.currentTimeMillis();
+    exec("sleep 0.2", true);
+    long diff = System.currentTimeMillis() - now;
+    assertTrue(diff >= 200);
+    assertTrue(diff < 400);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void addauths() throws Exception {
+    // addauths
+    exec("createtable xyzzy -evc");
+    exec("insert a b c d -l foo", false, "does not have authorization", true);
+    exec("addauths -s foo,bar", true);
+    exec("getauths", true, "foo", true);
+    exec("getauths", true, "bar", true);
+    exec("insert a b c d -l foo");
+    exec("scan", true, "[foo]");
+    exec("scan -s bar", true, "[foo]", false);
+    exec("deletetable -f xyzzy");
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void byeQuitExit() throws Exception {
+    // bye, quit, exit
+    for (String cmd : "bye quit exit".split(" ")) {
+      assertFalse(shell.getExit());
+      exec(cmd);
+      assertTrue(shell.getExit());
+      shell.setExit(false);
+    }
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void classpath() throws Exception {
+    // classpath
+    exec("classpath", true, "Level 2: Java Classloader (loads everything defined by java classpath) URL classpath items are", true);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void clearCls() throws Exception {
+    // clear/cls
+    if (shell.getReader().getTerminal().isAnsiSupported()) {
+      exec("cls", true, "[1;1H");
+      exec("clear", true, "[2J");
+    } else {
+      exec("cls", false, "does not support");
+      exec("clear", false, "does not support");
+    }
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void clonetable() throws Exception {
+    // clonetable
+    exec("createtable orig -evc");
+    exec("config -t orig -s table.split.threshold=123M", true);
+    exec("addsplits -t orig a b c", true);
+    exec("insert a b c value");
+    exec("scan", true, "value", true);
+    exec("clonetable orig clone");
+    // verify constraint, config, and splits were cloned
+    exec("constraint --list -t clone", true, "VisibilityConstraint=2", true);
+    exec("config -t clone -np", true, "123M", true);
+    exec("getsplits -t clone", true, "a\nb\nc\n");
+    // compact
+    exec("createtable c");
+    // make two files
+    exec("insert a b c d");
+    exec("flush -w");
+    exec("insert x y z v");
+    exec("flush -w");
+    int oldCount = countFiles();
+    // merge two files into one
+    exec("compact -t c -w");
+    assertTrue(countFiles() < oldCount);
+    exec("addsplits -t c f");
+    // make two more files:
+    exec("insert m 1 2 3");
+    exec("flush -w");
+    exec("insert n 1 2 3");
+    exec("flush -w");
+    oldCount = countFiles();
+    // at this point there are 3 files in the default tablet
+    // compact some data:
+    exec("compact -b g -e z -w");
+    assertTrue(countFiles() == oldCount - 2);
+    exec("compact -w");
+    assertTrue(countFiles() == oldCount - 2);
+    exec("merge --all -t c");
+    exec("compact -w");
+    assertTrue(countFiles() == oldCount - 3);
+    exec("deletetable -f orig");
+    exec("deletetable -f clone");
+    exec("deletetable -f c");
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void constraint() throws Exception {
+    // constraint
+    exec("constraint -l -t !METADATA", true, "MetadataConstraints=1", true);
+    exec("createtable c -evc");
+    exec("constraint -l -t c", true, "VisibilityConstraint=2", true);
+    exec("constraint -t c -d 2", true, "Removed constraint 2 from table c");
+    exec("constraint -l -t c", true, "VisibilityConstraint=2", false);
+    exec("deletetable -f c");
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void deletemany() throws Exception {
+    // deletemany
+    exec("createtable t");
+    make10();
+    assertEquals(10, countkeys("t"));
+    exec("deletemany -f -b row8");
+    assertEquals(8, countkeys("t"));
+    exec("scan -t t -np", true, "row8", false);
+    make10();
+    exec("deletemany -f -b row4 -e row5");
+    assertEquals(8, countkeys("t"));
+    make10();
+    exec("deletemany -f -c cf:col4,cf:col5");
+    assertEquals(8, countkeys("t"));
+    make10();
+    exec("deletemany -f -r row3");
+    assertEquals(9, countkeys("t"));
+    make10();
+    exec("deletemany -f -r row3");
+    assertEquals(9, countkeys("t"));
+    make10();
+    exec("deletemany -f -b row3 -be -e row5 -ee");
+    assertEquals(9, countkeys("t"));
+    exec("deletetable -f t");
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void deleterows() throws Exception {
+    // deleterows
+    int base = countFiles();
+    exec("createtable t");
+    exec("addsplits row5 row7");
+    make10();
+    exec("flush -w -t t");
+    assertTrue(base + 3 == countFiles());
+    exec("deleterows -t t -b row5 -e row7", true);
+    assertTrue(base + 2 == countFiles());
+    exec("deletetable -f t");
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void groups() throws Exception {
+    exec("createtable t");
+    exec("setgroups -t t alpha=a,b,c num=3,2,1");
+    exec("getgroups -t t", true, "alpha=a,b,c", true);
+    exec("getgroups -t t", true, "num=1,2,3", true);
+    exec("deletetable -f t");
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void grep() throws Exception {
+    exec("createtable t", true);
+    make10();
+    exec("grep row[123]", true, "row1", false);
+    exec("grep row5", true, "row5", true);
+    exec("deletetable -f t", true);
+  }
+  
+  @Test
+  // (timeout = 30 * 1000)
+  public void help() throws Exception {
+    exec("help -np", true, "Help Commands", true);
+    exec("?", true, "Help Commands", true);
+    for (String c : ("bye exit quit " + "about help info ? " + "deleteiter deletescaniter listiter setiter setscaniter "
+        + "grant revoke systempermissions tablepermissions userpermissions " + "execfile history " + "authenticate cls clear notable sleep table user whoami "
+        + "clonetable config createtable deletetable droptable du exporttable importtable offline online renametable tables "
+        + "addsplits compact constraint flush getgropus getsplits merge setgroups " + "addauths createuser deleteuser dropuser getauths passwd setauths users "
+        + "delete deletemany deleterows egrep formatter interpreter grep importdirectory insert maxrow scan").split(" ")) {
+      exec("help " + c, true);
+    }
+  }
+  
+  // @Test(timeout = 30 * 1000)
+  public void history() throws Exception {
+    exec("history -c", true);
+    exec("createtable unusualstring");
+    exec("deletetable -f unusualstring");
+    exec("history", true, "unusualstring", true);
+    exec("history", true, "history", true);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void importDirectory() throws Exception {
+    Configuration conf = new Configuration();
+    FileSystem fs = FileSystem.get(conf);
+    File importDir = folder.newFolder("import");
+    String even = new File(importDir, "even.rf").toString();
+    String odd = new File(importDir, "odd.rf").toString();
+    File errorsDir = folder.newFolder("errors");
+    fs.mkdirs(new Path(errorsDir.toString()));
+    AccumuloConfiguration aconf = AccumuloConfiguration.getDefaultConfiguration();
+    FileSKVWriter evenWriter = FileOperations.getInstance().openWriter(even, fs, conf, aconf);
+    evenWriter.startDefaultLocalityGroup();
+    FileSKVWriter oddWriter = FileOperations.getInstance().openWriter(odd, fs, conf, aconf);
+    oddWriter.startDefaultLocalityGroup();
+    long ts = System.currentTimeMillis();
+    Text cf = new Text("cf");
+    Text cq = new Text("cq");
+    Value value = new Value("value".getBytes());
+    for (int i = 0; i < 100; i += 2) {
+      Key key = new Key(new Text(String.format("%8d", i)), cf, cq, ts);
+      evenWriter.append(key, value);
+      key = new Key(new Text(String.format("%8d", i + 1)), cf, cq, ts);
+      oddWriter.append(key, value);
+    }
+    evenWriter.close();
+    oddWriter.close();
+    assertEquals(0, shell.getExitCode());
+    exec("createtable t", true);
+    exec("importdirectory " + importDir + " " + errorsDir + " true", true);
+    exec("scan -r 00000000", true, "00000000", true);
+    exec("scan -r 00000099", true, "00000099", true);
+    exec("deletetable -f t");
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void info() throws Exception {
+    exec("info", true, Constants.VERSION, true);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void interpreter() throws Exception {
+    exec("createtable t", true);
+    exec("interpreter -l", true, "HexScan", false);
+    exec("insert \\x02 cf cq value", true);
+    exec("scan -b 02", true, "value", false);
+    exec("interpreter -i org.apache.accumulo.core.util.interpret.HexScanInterpreter", true);
+    exec("interpreter -l", true, "HexScan", true);
+    exec("scan -b 02", true, "value", true);
+    exec("deletetable -f t", true);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void listcompactions() throws Exception {
+    exec("createtable t", true);
+    exec("config -t t -s table.iterator.minc.slow=30,org.apache.accumulo.test.functional.SlowIterator", true);
+    exec("config -t t -s table.iterator.minc.slow.opt.sleepTime=100", true);
+    exec("insert a cf cq value", true);
+    exec("insert b cf cq value", true);
+    exec("insert c cf cq value", true);
+    exec("insert d cf cq value", true);
+    exec("flush -t t", true);
+    exec("sleep 0.2", true);
+    exec("listcompactions", true, "default_tablet");
+    String[] lines = output.get().split("\n");
+    String last = lines[lines.length - 1];
+    String[] parts = last.split("\\|");
+    assertEquals(12, parts.length);
+    exec("deletetable -f t", true);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void maxrow() throws Exception {
+    exec("createtable t", true);
+    exec("insert a cf cq value", true);
+    exec("insert b cf cq value", true);
+    exec("insert ccc cf cq value", true);
+    exec("insert zzz cf cq value", true);
+    exec("maxrow", true, "zzz", true);
+    exec("delete zzz cf cq", true);
+    exec("maxrow", true, "ccc", true);
+    exec("deletetable -f t", true);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void merge() throws Exception {
+    exec("createtable t");
+    exec("addsplits a m z");
+    exec("getsplits", true, "z", true);
+    exec("merge --all", true);
+    exec("getsplits", true, "z", false);
+    exec("deletetable -f t");
+    exec("getsplits -t !METADATA", true);
+    assertEquals(2, output.get().split("\n").length);
+    exec("getsplits -t !!ROOT", true);
+    assertEquals(1, output.get().split("\n").length);
+    exec("merge --all -t !METADATA");
+    exec("getsplits -t !METADATA", true);
+    assertEquals(1, output.get().split("\n").length);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void ping() throws Exception {
+    for (int i = 0; i < 10; i++) {
+      exec("ping", true, "OK", true);
+      // wait for both tservers to start up
+      if (output.get().split("\n").length == 3)
+        break;
+      UtilWaitThread.sleep(1000);
+      
+    }
+    assertEquals(3, output.get().split("\n").length);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void renametable() throws Exception {
+    exec("createtable aaaa");
+    exec("insert this is a value");
+    exec("renametable aaaa xyzzy");
+    exec("tables", true, "xyzzy", true);
+    exec("tables", true, "aaaa", false);
+    exec("scan -t xyzzy", true, "value", true);
+    exec("deletetable -f xyzzy", true);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void systempermission() throws Exception {
+    exec("systempermissions");
+    assertEquals(8, output.get().split("\n").length - 1);
+    exec("tablepermissions", true);
+    assertEquals(6, output.get().split("\n").length - 1);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void listscans() throws Exception {
+    exec("createtable t", true);
+    exec("config -t t -s table.iterator.scan.slow=30,org.apache.accumulo.test.functional.SlowIterator", true);
+    exec("config -t t -s table.iterator.scan.slow.opt.sleepTime=100", true);
+    exec("insert a cf cq value", true);
+    exec("insert b cf cq value", true);
+    exec("insert c cf cq value", true);
+    exec("insert d cf cq value", true);
+    Thread thread = new Thread() {
+      @Override
+      public void run() {
+        try {
+          Connector connector = cluster.getConnector("root", secret);
+          Scanner s = connector.createScanner("t", Authorizations.EMPTY);
+          for (@SuppressWarnings("unused")
+          Entry<Key,Value> kv : s)
+            ;
+        } catch (Exception ex) {
+          throw new RuntimeException(ex);
+        }
+      }
+    };
+    thread.start();
+    exec("sleep 0.1", true);
+    String scans = exec("listscans", true);
+    String lines[] = scans.split("\n");
+    String last = lines[lines.length - 1];
+    assertTrue(last.contains("RUNNING"));
+    String parts[] = last.split("\\|");
+    assertEquals(13, parts.length);
+    thread.join();
+    exec("deletetable -f t", true);
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void testPertableClasspath() throws Exception {
+    File fooFilterJar = File.createTempFile("FooFilter", ".jar");
+    FileUtils.copyURLToFile(this.getClass().getResource("/FooFilter.jar"), fooFilterJar);
+    fooFilterJar.deleteOnExit();
+    
+    File fooConstraintJar = File.createTempFile("FooConstraint", ".jar");
+    FileUtils.copyURLToFile(this.getClass().getResource("/FooConstraint.jar"), fooConstraintJar);
+    fooConstraintJar.deleteOnExit();
+    
+    exec(
+        "config -s " + Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "cx1=" + fooFilterJar.toURI().toString() + "," + fooConstraintJar.toURI().toString(),
+        true);
+    
+    exec("createtable ptc", true);
+    exec("config -t ptc -s " + Property.TABLE_CLASSPATH.getKey() + "=cx1", true);
+    
+    UtilWaitThread.sleep(200);
+    
+    exec("setiter -scan -class org.apache.accumulo.test.FooFilter -p 10 -n foo", true);
+    
+    exec("insert foo f q v", true);
+    
+    UtilWaitThread.sleep(100);
+    
+    exec("scan -np", true, "foo", false);
+    
+    exec("constraint -a FooConstraint", true);
+    
+    exec("offline ptc");
+    UtilWaitThread.sleep(500);
+    exec("online ptc");
+    
+    exec("table ptc", true);
+    exec("insert foo f q v", false);
+    exec("insert ok foo q v", true);
+    
+    exec("deletetable ptc", true);
+    exec("config -d " + Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "cx1");
+    
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void trace() throws Exception {
+    exec("trace on", true);
+    exec("createtable t", true);
+    exec("insert a b c value", true);
+    exec("scan -np", true, "value", true);
+    exec("deletetable -f t");
+    exec("sleep 1");
+    String trace = exec("trace off");
+    System.out.println(trace);
+    assertTrue(trace.contains("sendMutations"));
+    assertTrue(trace.contains("startScan"));
+    assertTrue(trace.contains("DeleteTable"));
+  }
+  
+  @Test(timeout=30 * 1000)
+  public void badLogin() throws Exception {
+    input.set(secret + "\n");
+    String err = exec("user NoSuchUser", false);
+    assertTrue(err.contains("BAD_CREDENTIALS for user NoSuchUser"));
+  }
+  
+  private int countkeys(String table) throws IOException {
+    exec("scan -np -t " + table);
+    return output.get().split("\n").length - 1;
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void scans() throws Exception {
+    exec("createtable t");
+    make10();
+    String result = exec("scan -np -b row1 -e row1");
+    assertEquals(2, result.split("\n").length);
+    result = exec("scan -np -b row3 -e row5");
+    assertEquals(4, result.split("\n").length);
+    result = exec("scan -np -r row3");
+    assertEquals(2, result.split("\n").length);
+    result = exec("scan -np -b row:");
+    assertEquals(1, result.split("\n").length);
+    result = exec("scan -np -b row");
+    assertEquals(11, result.split("\n").length);
+    result = exec("scan -np -e row:");
+    assertEquals(11, result.split("\n").length);
+    exec("deletetable -f t");
+  }
+  
+  @Test(timeout = 30 * 1000)
+  public void whoami() throws Exception {
+    assertTrue(exec("whoami", true).contains("root"));
+    input.set("secret\nsecret\n");
+    exec("createuser test_user");
+    exec("setauths -u test_user -s 12,3,4");
+    String auths = exec("getauths -u test_user");
+    assertTrue(auths.contains("3") && auths.contains("12") && auths.contains("4"));
+    input.set("secret\n");
+    exec("user test_user", true);
+    assertTrue(exec("whoami", true).contains("test_user"));
+    input.set(secret + "\n");
+    exec("user root", true);
+  }
+  
+  private void make10() throws IOException {
+    for (int i = 0; i < 10; i++) {
+      exec(String.format("insert row%d cf col%d value", i, i));
+    }
+  }
+  
+  private int countFiles() throws IOException {
+    exec("scan -t !METADATA -np -c file");
+    return output.get().split("\n").length - 1;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/7a1075a4/test/src/test/java/org/apache/accumulo/test/ShellServerTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/ShellServerTest.java b/test/src/test/java/org/apache/accumulo/test/ShellServerTest.java
deleted file mode 100644
index f2f4ebe..0000000
--- a/test/src/test/java/org/apache/accumulo/test/ShellServerTest.java
+++ /dev/null
@@ -1,805 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.test;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.PrintWriter;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Method;
-import java.util.Map.Entry;
-
-import jline.console.ConsoleReader;
-
-import org.apache.accumulo.core.Constants;
-import org.apache.accumulo.core.client.Connector;
-import org.apache.accumulo.core.client.Scanner;
-import org.apache.accumulo.core.conf.AccumuloConfiguration;
-import org.apache.accumulo.core.conf.Property;
-import org.apache.accumulo.core.data.Key;
-import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.file.FileOperations;
-import org.apache.accumulo.core.file.FileSKVWriter;
-import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.core.util.UtilWaitThread;
-import org.apache.accumulo.core.util.shell.Shell;
-import org.apache.accumulo.minicluster.MiniAccumuloCluster;
-import org.apache.accumulo.minicluster.MiniAccumuloConfig;
-import org.apache.accumulo.server.trace.TraceServer;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.tools.DistCp;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-public class ShellServerTest {
-  public static class TestOutputStream extends OutputStream {
-    StringBuilder sb = new StringBuilder();
-    
-    @Override
-    public void write(int b) throws IOException {
-      sb.append((char) (0xff & b));
-    }
-    
-    public String get() {
-      return sb.toString();
-    }
-    
-    public void clear() {
-      sb.setLength(0);
-    }
-  }
-  
-  public static class StringInputStream extends InputStream {
-    private String source = "";
-    private int offset = 0;
-    
-    @Override
-    public int read() throws IOException {
-      if (offset == source.length())
-        return '\n';
-      else
-        return source.charAt(offset++);
-    }
-    
-    public void set(String other) {
-      source = other;
-      offset = 0;
-    }
-  }
-  
-  private static String secret = "superSecret";
-  public static TemporaryFolder folder = new TemporaryFolder();
-  public static MiniAccumuloCluster cluster;
-  public static TestOutputStream output;
-  public static StringInputStream input;
-  public static Shell shell;
-  private static Process traceProcess;
-  
-  static String exec(String cmd) throws IOException {
-    output.clear();
-    shell.execCommand(cmd, true, true);
-    return output.get();
-  }
-  
-  static String exec(String cmd, boolean expectGoodExit) throws IOException {
-    String result = exec(cmd);
-    if (expectGoodExit)
-      assertGoodExit("", true);
-    else
-      assertBadExit("", true);
-    return result;
-  }
-  
-  static String exec(String cmd, boolean expectGoodExit, String expectString) throws IOException {
-    return exec(cmd, expectGoodExit, expectString, true);
-  }
-  
-  static String exec(String cmd, boolean expectGoodExit, String expectString, boolean stringPresent) throws IOException {
-    String result = exec(cmd);
-    if (expectGoodExit)
-      assertGoodExit(expectString, stringPresent);
-    else
-      assertBadExit(expectString, stringPresent);
-    return result;
-  }
-  
-  static void assertGoodExit(String s, boolean stringPresent) {
-    Shell.log.info(output.get());
-    assertEquals(0, shell.getExitCode());
-    
-    if (s.length() > 0)
-      assertEquals(s + " present in " + output.get() + " was not " + stringPresent, stringPresent, output.get().contains(s));
-  }
-  
-  static void assertBadExit(String s, boolean stringPresent) {
-    Shell.log.debug(output.get());
-    assertTrue(shell.getExitCode() > 0);
-    if (s.length() > 0)
-      assertEquals(s + " present in " + output.get() + " was not " + stringPresent, stringPresent, output.get().contains(s));
-    shell.resetExitCode();
-  }
-  
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    folder.create();
-    MiniAccumuloConfig cfg = new MiniAccumuloConfig(folder.newFolder("miniAccumulo"), secret);
-    cluster = new MiniAccumuloCluster(cfg);
-    cluster.start();
-    
-    System.setProperty("HOME", folder.getRoot().getAbsolutePath());
-    
-    // start the shell
-    output = new TestOutputStream();
-    input = new StringInputStream();
-    shell = new Shell(new ConsoleReader(input, output));
-    shell.setLogErrorsToConsole();
-    shell.config("-u", "root", "-p", secret, "-z", cluster.getConfig().getInstanceName(), cluster.getConfig().getZooKeepers());
-    exec("quit", true);
-    shell.start();
-    shell.setExit(false);
-    
-    // use reflection to call this method so it does not need to be made public
-    Method method = cluster.getClass().getDeclaredMethod("exec", Class.class, String[].class);
-    method.setAccessible(true);
-    traceProcess = (Process) method.invoke(cluster, TraceServer.class, new String[0]);
-    
-    // give the tracer some time to start
-    UtilWaitThread.sleep(1000);
-  }
-  
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    cluster.stop();
-    traceProcess.destroy();
-    folder.delete();
-  }
-  
-  @Test(timeout = 30000)
-  public void exporttableImporttable() throws Exception {
-    // exporttable / importtable
-    exec("createtable t -evc", true);
-    make10();
-    exec("addsplits row5", true);
-    exec("config -t t -s table.split.threshold=345M", true);
-    exec("offline t", true);
-    String export = "file://" + folder.newFolder().toString();
-    exec("exporttable -t t " + export, true);
-    DistCp cp = newDistCp();
-    String import_ = "file://" + folder.newFolder().toString();
-    cp.run(new String[] {"-f", export + "/distcp.txt", import_});
-    exec("importtable t2 " + import_, true);
-    exec("config -t t2 -np", true, "345M", true);
-    exec("getsplits -t t2", true, "row5", true);
-    exec("constraint --list -t t2", true, "VisibilityConstraint=2", true);
-    exec("onlinetable t", true);
-    exec("deletetable -f t", true);
-    exec("deletetable -f t2", true);
-  }
-  
-  private DistCp newDistCp() {
-    try {
-      @SuppressWarnings("unchecked")
-      Constructor<DistCp>[] constructors = (Constructor<DistCp>[]) DistCp.class.getConstructors();
-      for (Constructor<DistCp> constructor : constructors) {
-        Class<?>[] parameterTypes = constructor.getParameterTypes();
-        if (parameterTypes.length > 0 && parameterTypes[0].equals(Configuration.class)) {
-          if (parameterTypes.length == 1) {
-            return constructor.newInstance(new Configuration());
-          } else if (parameterTypes.length == 2) {
-            return constructor.newInstance(new Configuration(), null);
-          }
-        }
-      }
-    } catch (Exception e) {
-      throw new RuntimeException(e);
-    }
-    throw new RuntimeException("Unexpected constructors for DistCp");
-  }
-  
-  @Test(timeout = 30000)
-  public void setscaniterDeletescaniter() throws Exception {
-    // setscaniter, deletescaniter
-    exec("createtable t");
-    exec("insert a cf cq 1");
-    exec("insert a cf cq 1");
-    exec("insert a cf cq 1");
-    input.set("true\n\n\nSTRING");
-    exec("setscaniter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n name", true);
-    exec("scan", true, "3", true);
-    exec("deletescaniter -n name", true);
-    exec("scan", true, "1", true);
-    exec("deletetable -f t");
-    
-  }
-  
-  @Test(timeout = 30000)
-  public void execfile() throws Exception {
-    // execfile
-    File file = folder.newFile();
-    PrintWriter writer = new PrintWriter(file.getAbsolutePath());
-    writer.println("about");
-    writer.close();
-    exec("execfile " + file.getAbsolutePath(), true, Constants.VERSION, true);
-    
-  }
-  
-  @Test(timeout = 30000)
-  public void egrep() throws Exception {
-    // egrep
-    exec("createtable t");
-    make10();
-    String lines = exec("egrep row[123]", true);
-    assertTrue(lines.split("\n").length - 1 == 3);
-    exec("deletetable -f t");
-  }
-  
-  @Test(timeout = 30000)
-  public void du() throws Exception {
-    // du
-    exec("createtable t");
-    make10();
-    exec("flush -t t -w");
-    exec("du t", true, " [t]", true);
-    output.clear();
-    shell.execCommand("du -h", false, false);
-    String o = output.get();
-    assertTrue(o.matches(".*26[0-9]\\s\\[t\\]\\n")); // for some reason, there's 1-2 bytes of fluctuation
-    exec("deletetable -f t");
-  }
-  
-  @Test(timeout = 1000)
-  public void debug() throws Exception {
-    exec("debug", true, "off", true);
-    exec("debug on", true);
-    exec("debug", true, "on", true);
-    exec("debug off", true);
-    exec("debug", true, "off", true);
-    exec("debug debug", false);
-    exec("debug debug debug", false);
-  }
-  
-  @Test(timeout = 30000)
-  public void user() throws Exception {
-    // createuser, deleteuser, user, users, droptable, grant, revoke
-    input.set("secret\nsecret\n");
-    exec("createuser xyzzy", true);
-    exec("users", true, "xyzzy", true);
-    String perms = exec("userpermissions -u xyzzy", true);
-    assertTrue(perms.contains("Table permissions (!METADATA): Table.READ"));
-    exec("grant -u xyzzy -s System.CREATE_TABLE", true);
-    perms = exec("userpermissions -u xyzzy", true);
-    assertTrue(perms.contains(""));
-    exec("grant -u root -t !METADATA Table.WRITE", true);
-    exec("grant -u root -t !METADATA Table.GOOFY", false);
-    exec("grant -u root -s foo", false);
-    exec("grant -u xyzzy -t !METADATA foo", false);
-    input.set("secret\nsecret\n");
-    exec("user xyzzy", true);
-    exec("createtable t", true, "xyzzy@", true);
-    exec("insert row1 cf cq 1", true);
-    exec("scan", true, "row1", true);
-    exec("droptable -f t", true);
-    exec("deleteuser xyzzy", false, "delete yourself", true);
-    input.set(secret + "\n" + secret + "\n");
-    exec("user root", true);
-    exec("revoke -u xyzzy -s System.CREATE_TABLE", true);
-    exec("revoke -u xyzzy -s System.GOOFY", false);
-    exec("revoke -u xyzzy -s foo", false);
-    exec("revoke -u xyzzy -t !METADATA Table.WRITE", true);
-    exec("revoke -u xyzzy -t !METADATA Table.GOOFY", false);
-    exec("revoke -u xyzzy -t !METADATA foo", false);
-    exec("deleteuser xyzzy", true);
-    exec("users", true, "xyzzy", false);
-  }
-  
-  @Test(timeout = 30000)
-  public void iter() throws Exception {
-    // setshelliter, listshelliter, deleteshelliter
-    exec("createtable t");
-    exec("insert a cf cq 1");
-    exec("insert a cf cq 1");
-    exec("insert a cf cq 1");
-    input.set("true\n\n\nSTRING\n");
-    exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -pn sum -n name", true);
-    input.set("true\n\n\nSTRING\n");
-    exec("setshelliter -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -pn sum -n xyzzy", true);
-    exec("scan -pn sum", true, "3", true);
-    exec("listshelliter", true, "Iterator name", true);
-    exec("listshelliter", true, "Iterator xyzzy", true);
-    exec("listshelliter", true, "Profile : sum", true);
-    exec("deleteshelliter -pn sum -n name", true);
-    exec("listshelliter", true, "Iterator name", false);
-    exec("listshelliter", true, "Iterator xyzzy", true);
-    exec("deleteshelliter -pn sum -a", true);
-    exec("listshelliter", true, "Iterator xyzzy", false);
-    exec("listshelliter", true, "Profile : sum", false);
-    exec("deletetable -f t");
-    // list iter
-    exec("createtable t");
-    exec("insert a cf cq 1");
-    exec("insert a cf cq 1");
-    exec("insert a cf cq 1");
-    input.set("true\n\n\nSTRING\n");
-    exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 10 -n name", true);
-    input.set("true\n\n\nSTRING\n");
-    exec("setiter -scan -class org.apache.accumulo.core.iterators.user.SummingCombiner -p 11 -n xyzzy", true);
-    exec("scan", true, "3", true);
-    exec("listiter -scan", true, "Iterator name", true);
-    exec("listiter -scan", true, "Iterator xyzzy", true);
-    exec("listiter -minc", true, "Iterator name", false);
-    exec("listiter -minc", true, "Iterator xyzzy", false);
-    exec("deleteiter -scan -n name", true);
-    exec("listiter -scan", true, "Iterator name", false);
-    exec("listiter -scan", true, "Iterator xyzzy", true);
-    exec("deletetable -f t");
-    
-  }
-  
-  @Test(timeout = 30000)
-  public void notable() throws Exception {
-    // notable
-    exec("createtable xyzzy", true);
-    exec("scan", true, " xyzzy>", true);
-    assertTrue(output.get().contains(" xyzzy>"));
-    exec("notable", true);
-    exec("scan", false, "Not in a table context.", true);
-    assertFalse(output.get().contains(" xyzzy>"));
-    exec("deletetable -f xyzzy");
-  }
-  
-  @Test(timeout = 30000)
-  public void sleep() throws Exception {
-    // sleep
-    long now = System.currentTimeMillis();
-    exec("sleep 0.2", true);
-    long diff = System.currentTimeMillis() - now;
-    assertTrue(diff >= 200);
-    assertTrue(diff < 400);
-  }
-  
-  @Test(timeout = 30000)
-  public void addauths() throws Exception {
-    // addauths
-    exec("createtable xyzzy -evc");
-    exec("insert a b c d -l foo", false, "does not have authorization", true);
-    exec("addauths -s foo,bar", true);
-    exec("getauths", true, "foo", true);
-    exec("getauths", true, "bar", true);
-    exec("insert a b c d -l foo");
-    exec("scan", true, "[foo]");
-    exec("scan -s bar", true, "[foo]", false);
-    exec("deletetable -f xyzzy");
-  }
-  
-  @Test(timeout = 30000)
-  public void byeQuitExit() throws Exception {
-    // bye, quit, exit
-    for (String cmd : "bye quit exit".split(" ")) {
-      assertFalse(shell.getExit());
-      exec(cmd);
-      assertTrue(shell.getExit());
-      shell.setExit(false);
-    }
-  }
-  
-  @Test(timeout = 30000)
-  public void classpath() throws Exception {
-    // classpath
-    exec("classpath", true, "Level 2: Java Classloader (loads everything defined by java classpath) URL classpath items are", true);
-  }
-  
-  @Test(timeout = 30000)
-  public void clearCls() throws Exception {
-    // clear/cls
-    if (shell.getReader().getTerminal().isAnsiSupported()) {
-      exec("cls", true, "[1;1H");
-      exec("clear", true, "[2J");
-    } else {
-      exec("cls", false, "does not support");
-      exec("clear", false, "does not support");
-    }
-  }
-  
-  @Test(timeout = 30000)
-  public void clonetable() throws Exception {
-    // clonetable
-    exec("createtable orig -evc");
-    exec("config -t orig -s table.split.threshold=123M", true);
-    exec("addsplits -t orig a b c", true);
-    exec("insert a b c value");
-    exec("scan", true, "value", true);
-    exec("clonetable orig clone");
-    // verify constraint, config, and splits were cloned
-    exec("constraint --list -t clone", true, "VisibilityConstraint=2", true);
-    exec("config -t clone -np", true, "123M", true);
-    exec("getsplits -t clone", true, "a\nb\nc\n");
-    // compact
-    exec("createtable c");
-    // make two files
-    exec("insert a b c d");
-    exec("flush -w");
-    exec("insert x y z v");
-    exec("flush -w");
-    int oldCount = countFiles();
-    // merge two files into one
-    exec("compact -t c -w");
-    assertTrue(countFiles() < oldCount);
-    exec("addsplits -t c f");
-    // make two more files:
-    exec("insert m 1 2 3");
-    exec("flush -w");
-    exec("insert n 1 2 3");
-    exec("flush -w");
-    oldCount = countFiles();
-    // at this point there are 3 files in the default tablet
-    // compact some data:
-    exec("compact -b g -e z -w");
-    assertTrue(countFiles() == oldCount - 2);
-    exec("compact -w");
-    assertTrue(countFiles() == oldCount - 2);
-    exec("merge --all -t c");
-    exec("compact -w");
-    assertTrue(countFiles() == oldCount - 3);
-    exec("deletetable -f orig");
-    exec("deletetable -f clone");
-    exec("deletetable -f c");
-  }
-  
-  @Test(timeout = 30000)
-  public void constraint() throws Exception {
-    // constraint
-    exec("constraint -l -t !METADATA", true, "MetadataConstraints=1", true);
-    exec("createtable c -evc");
-    exec("constraint -l -t c", true, "VisibilityConstraint=2", true);
-    exec("constraint -t c -d 2", true, "Removed constraint 2 from table c");
-    exec("constraint -l -t c", true, "VisibilityConstraint=2", false);
-    exec("deletetable -f c");
-  }
-  
-  @Test(timeout = 30000)
-  public void deletemany() throws Exception {
-    // deletemany
-    exec("createtable t");
-    make10();
-    assertEquals(10, countkeys("t"));
-    exec("deletemany -f -b row8");
-    assertEquals(8, countkeys("t"));
-    exec("scan -t t -np", true, "row8", false);
-    make10();
-    exec("deletemany -f -b row4 -e row5");
-    assertEquals(8, countkeys("t"));
-    make10();
-    exec("deletemany -f -c cf:col4,cf:col5");
-    assertEquals(8, countkeys("t"));
-    make10();
-    exec("deletemany -f -r row3");
-    assertEquals(9, countkeys("t"));
-    make10();
-    exec("deletemany -f -r row3");
-    assertEquals(9, countkeys("t"));
-    make10();
-    exec("deletemany -f -b row3 -be -e row5 -ee");
-    assertEquals(9, countkeys("t"));
-    exec("deletetable -f t");
-  }
-  
-  @Test(timeout = 30000)
-  public void deleterows() throws Exception {
-    // deleterows
-    int base = countFiles();
-    exec("createtable t");
-    exec("addsplits row5 row7");
-    make10();
-    exec("flush -w -t t");
-    assertTrue(base + 3 == countFiles());
-    exec("deleterows -t t -b row5 -e row7", true);
-    assertTrue(base + 2 == countFiles());
-    exec("deletetable -f t");
-  }
-  
-  @Test(timeout = 30000)
-  public void groups() throws Exception {
-    exec("createtable t");
-    exec("setgroups -t t alpha=a,b,c num=3,2,1");
-    exec("getgroups -t t", true, "alpha=a,b,c", true);
-    exec("getgroups -t t", true, "num=1,2,3", true);
-    exec("deletetable -f t");
-  }
-  
-  @Test(timeout = 30000)
-  public void grep() throws Exception {
-    exec("createtable t", true);
-    make10();
-    exec("grep row[123]", true, "row1", false);
-    exec("grep row5", true, "row5", true);
-    exec("deletetable -f t", true);
-  }
-  
-  @Test
-  // (timeout = 30000)
-  public void help() throws Exception {
-    exec("help -np", true, "Help Commands", true);
-    exec("?", true, "Help Commands", true);
-    for (String c : ("bye exit quit " + "about help info ? " + "deleteiter deletescaniter listiter setiter setscaniter "
-        + "grant revoke systempermissions tablepermissions userpermissions " + "execfile history " + "authenticate cls clear notable sleep table user whoami "
-        + "clonetable config createtable deletetable droptable du exporttable importtable offline online renametable tables "
-        + "addsplits compact constraint flush getgropus getsplits merge setgroups " + "addauths createuser deleteuser dropuser getauths passwd setauths users "
-        + "delete deletemany deleterows egrep formatter interpreter grep importdirectory insert maxrow scan").split(" ")) {
-      exec("help " + c, true);
-    }
-  }
-  
-  // @Test(timeout = 30000)
-  public void history() throws Exception {
-    exec("history -c", true);
-    exec("createtable unusualstring");
-    exec("deletetable -f unusualstring");
-    exec("history", true, "unusualstring", true);
-    exec("history", true, "history", true);
-  }
-  
-  @Test(timeout = 30000)
-  public void importDirectory() throws Exception {
-    Configuration conf = new Configuration();
-    FileSystem fs = FileSystem.get(conf);
-    File importDir = folder.newFolder("import");
-    String even = new File(importDir, "even.rf").toString();
-    String odd = new File(importDir, "odd.rf").toString();
-    File errorsDir = folder.newFolder("errors");
-    fs.mkdirs(new Path(errorsDir.toString()));
-    AccumuloConfiguration aconf = AccumuloConfiguration.getDefaultConfiguration();
-    FileSKVWriter evenWriter = FileOperations.getInstance().openWriter(even, fs, conf, aconf);
-    evenWriter.startDefaultLocalityGroup();
-    FileSKVWriter oddWriter = FileOperations.getInstance().openWriter(odd, fs, conf, aconf);
-    oddWriter.startDefaultLocalityGroup();
-    long ts = System.currentTimeMillis();
-    Text cf = new Text("cf");
-    Text cq = new Text("cq");
-    Value value = new Value("value".getBytes());
-    for (int i = 0; i < 100; i += 2) {
-      Key key = new Key(new Text(String.format("%8d", i)), cf, cq, ts);
-      evenWriter.append(key, value);
-      key = new Key(new Text(String.format("%8d", i + 1)), cf, cq, ts);
-      oddWriter.append(key, value);
-    }
-    evenWriter.close();
-    oddWriter.close();
-    exec("createtable t", true);
-    exec("importdirectory " + importDir + " " + errorsDir + " true", true);
-    exec("scan -r 00000000", true, "00000000", true);
-    exec("scan -r 00000099", true, "00000099", true);
-    exec("deletetable -f t");
-  }
-  
-  @Test(timeout = 30000)
-  public void info() throws Exception {
-    exec("info", true, Constants.VERSION, true);
-  }
-  
-  @Test(timeout = 30000)
-  public void interpreter() throws Exception {
-    exec("createtable t", true);
-    exec("interpreter -l", true, "HexScan", false);
-    exec("insert \\x02 cf cq value", true);
-    exec("scan -b 02", true, "value", false);
-    exec("interpreter -i org.apache.accumulo.core.util.interpret.HexScanInterpreter", true);
-    exec("interpreter -l", true, "HexScan", true);
-    exec("scan -b 02", true, "value", true);
-    exec("deletetable -f t", true);
-  }
-  
-  @Test(timeout = 30000)
-  public void listcompactions() throws Exception {
-    exec("createtable t", true);
-    exec("config -t t -s table.iterator.minc.slow=30,org.apache.accumulo.test.functional.SlowIterator", true);
-    exec("config -t t -s table.iterator.minc.slow.opt.sleepTime=100", true);
-    exec("insert a cf cq value", true);
-    exec("insert b cf cq value", true);
-    exec("insert c cf cq value", true);
-    exec("insert d cf cq value", true);
-    exec("flush -t t", true);
-    exec("sleep 0.2", true);
-    exec("listcompactions", true, "default_tablet");
-    String[] lines = output.get().split("\n");
-    String last = lines[lines.length - 1];
-    String[] parts = last.split("\\|");
-    assertEquals(12, parts.length);
-    exec("deletetable -f t", true);
-  }
-  
-  @Test(timeout = 30000)
-  public void maxrow() throws Exception {
-    exec("createtable t", true);
-    exec("insert a cf cq value", true);
-    exec("insert b cf cq value", true);
-    exec("insert ccc cf cq value", true);
-    exec("insert zzz cf cq value", true);
-    exec("maxrow", true, "zzz", true);
-    exec("delete zzz cf cq", true);
-    exec("maxrow", true, "ccc", true);
-    exec("deletetable -f t", true);
-  }
-  
-  @Test(timeout = 30000)
-  public void merge() throws Exception {
-    exec("createtable t");
-    exec("addsplits a m z");
-    exec("getsplits", true, "z", true);
-    exec("merge --all", true);
-    exec("getsplits", true, "z", false);
-    exec("deletetable -f t");
-    exec("getsplits -t !METADATA", true);
-    assertEquals(2, output.get().split("\n").length);
-    exec("getsplits -t !!ROOT", true);
-    assertEquals(1, output.get().split("\n").length);
-    exec("merge --all -t !METADATA");
-    exec("getsplits -t !METADATA", true);
-    assertEquals(1, output.get().split("\n").length);
-  }
-  
-  @Test(timeout = 30000)
-  public void ping() throws Exception {
-    for (int i = 0; i < 10; i++) {
-      exec("ping", true, "OK", true);
-      // wait for both tservers to start up
-      if (output.get().split("\n").length == 3)
-        break;
-      UtilWaitThread.sleep(1000);
-      
-    }
-    assertEquals(3, output.get().split("\n").length);
-  }
-  
-  @Test(timeout = 30000)
-  public void renametable() throws Exception {
-    exec("createtable aaaa");
-    exec("insert this is a value");
-    exec("renametable aaaa xyzzy");
-    exec("tables", true, "xyzzy", true);
-    exec("tables", true, "aaaa", false);
-    exec("scan -t xyzzy", true, "value", true);
-    exec("deletetable -f xyzzy", true);
-  }
-  
-  @Test(timeout = 30000)
-  public void systempermission() throws Exception {
-    exec("systempermissions");
-    assertEquals(8, output.get().split("\n").length - 1);
-    exec("tablepermissions", true);
-    assertEquals(6, output.get().split("\n").length - 1);
-  }
-  
-  @Test(timeout = 30000)
-  public void listscans() throws Exception {
-    exec("createtable t", true);
-    exec("config -t t -s table.iterator.scan.slow=30,org.apache.accumulo.test.functional.SlowIterator", true);
-    exec("config -t t -s table.iterator.scan.slow.opt.sleepTime=100", true);
-    exec("insert a cf cq value", true);
-    exec("insert b cf cq value", true);
-    exec("insert c cf cq value", true);
-    exec("insert d cf cq value", true);
-    Thread thread = new Thread() {
-      @Override
-      public void run() {
-        try {
-          Connector connector = cluster.getConnector("root", secret);
-          Scanner s = connector.createScanner("t", Authorizations.EMPTY);
-          for (@SuppressWarnings("unused")
-          Entry<Key,Value> kv : s)
-            ;
-        } catch (Exception ex) {
-          throw new RuntimeException(ex);
-        }
-      }
-    };
-    thread.start();
-    exec("sleep 0.1", true);
-    String scans = exec("listscans", true);
-    String lines[] = scans.split("\n");
-    String last = lines[lines.length - 1];
-    assertTrue(last.contains("RUNNING"));
-    String parts[] = last.split("\\|");
-    assertEquals(13, parts.length);
-    thread.join();
-    exec("deletetable -f t", true);
-  }
-  
-  @Test(timeout = 30000)
-  public void testPertableClasspath() throws Exception {
-    File fooFilterJar = File.createTempFile("FooFilter", ".jar");
-    FileUtils.copyURLToFile(this.getClass().getResource("/FooFilter.jar"), fooFilterJar);
-    fooFilterJar.deleteOnExit();
-    
-    File fooConstraintJar = File.createTempFile("FooConstraint", ".jar");
-    FileUtils.copyURLToFile(this.getClass().getResource("/FooConstraint.jar"), fooConstraintJar);
-    fooConstraintJar.deleteOnExit();
-    
-    exec(
-        "config -s " + Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "cx1=" + fooFilterJar.toURI().toString() + "," + fooConstraintJar.toURI().toString(),
-        true);
-    
-    exec("createtable ptc", true);
-    exec("config -t ptc -s " + Property.TABLE_CLASSPATH.getKey() + "=cx1", true);
-    
-    UtilWaitThread.sleep(200);
-    
-    exec("setiter -scan -class org.apache.accumulo.test.FooFilter -p 10 -n foo", true);
-    
-    exec("insert foo f q v", true);
-    
-    UtilWaitThread.sleep(100);
-    
-    exec("scan -np", true, "foo", false);
-    
-    exec("constraint -a FooConstraint", true);
-    
-    exec("offline ptc");
-    UtilWaitThread.sleep(500);
-    exec("online ptc");
-    
-    exec("table ptc", true);
-    exec("insert foo f q v", false);
-    exec("insert ok foo q v", true);
-    
-    exec("deletetable ptc", true);
-    exec("config -d " + Property.VFS_CONTEXT_CLASSPATH_PROPERTY.getKey() + "cx1");
-    
-  }
-  
-  @Test(timeout = 30000)
-  public void trace() throws Exception {
-    exec("trace on", true);
-    exec("createtable t", true);
-    exec("insert a b c value", true);
-    exec("scan -np", true, "value", true);
-    exec("deletetable -f t");
-    exec("sleep 1");
-    String trace = exec("trace off");
-    assertTrue(trace.contains("binMutations"));
-    assertTrue(trace.contains("update"));
-    assertTrue(trace.contains("DeleteTable"));
-  }
-  
-  private int countkeys(String table) throws IOException {
-    exec("scan -np -t " + table);
-    return output.get().split("\n").length - 1;
-  }
-  
-  private void make10() throws IOException {
-    for (int i = 0; i < 10; i++) {
-      exec(String.format("insert row%d cf col%d value", i, i));
-    }
-  }
-  
-  private int countFiles() throws IOException {
-    exec("scan -t !METADATA -np -c file");
-    return output.get().split("\n").length - 1;
-  }
-  
-}


[29/50] [abbrv] git commit: ACCUMULO-1537 - Adding apache header

Posted by ct...@apache.org.
ACCUMULO-1537 - Adding apache header



git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1501951 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/c72a194a
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/c72a194a
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/c72a194a

Branch: refs/heads/ACCUMULO-1496
Commit: c72a194a164361a5eee04cbd270bcee8004327f6
Parents: 0b6b734
Author: John Vines <vi...@apache.org>
Authored: Wed Jul 10 20:08:11 2013 +0000
Committer: John Vines <vi...@apache.org>
Committed: Wed Jul 10 20:08:11 2013 +0000

----------------------------------------------------------------------
 .../accumulo/test/functional/HalfDeadTServerIT.java | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/c72a194a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java b/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
index 2064627..ee08012 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/HalfDeadTServerIT.java
@@ -1,3 +1,19 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.accumulo.test.functional;
 
 import static org.junit.Assert.assertEquals;


[36/50] [abbrv] git commit: ACCUMULO-1537 fix static cluster/folder in MacTest

Posted by ct...@apache.org.
ACCUMULO-1537 fix static cluster/folder in MacTest

git-svn-id: https://svn.apache.org/repos/asf/accumulo/trunk@1502343 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/720e27a5
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/720e27a5
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/720e27a5

Branch: refs/heads/ACCUMULO-1496
Commit: 720e27a59a3a12ca70ee20d85eaf7c6afab83429
Parents: f43930d
Author: Eric C. Newton <ec...@apache.org>
Authored: Thu Jul 11 20:07:34 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Thu Jul 11 20:07:34 2013 +0000

----------------------------------------------------------------------
 .../test/java/org/apache/accumulo/test/functional/DeleteIT.java | 5 +++--
 .../test/java/org/apache/accumulo/test/functional/MacTest.java  | 4 ++--
 .../test/java/org/apache/accumulo/test/functional/SplitIT.java  | 2 +-
 3 files changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/720e27a5/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
index 36c852e..08feb8a 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/DeleteIT.java
@@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals;
 
 import org.apache.accumulo.core.cli.ScannerOpts;
 import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.server.util.Admin;
 import org.apache.accumulo.test.TestIngest;
 import org.apache.accumulo.test.TestRandomDeletes;
@@ -32,11 +33,11 @@ public class DeleteIT extends MacTest {
   public void test() throws Exception {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
-    deleteTest(c);
+    deleteTest(c, cluster);
     assertEquals(0, cluster.exec(Admin.class, "stopAll").waitFor());
   }
 
-  public static void deleteTest(Connector c) throws Exception {
+  public static void deleteTest(Connector c, MiniAccumuloCluster cluster) throws Exception {
     VerifyIngest.Opts vopts = new VerifyIngest.Opts();
     TestIngest.Opts opts = new TestIngest.Opts();
     vopts.rows = opts.rows = 1000;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/720e27a5/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java b/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
index 0a288c5..a52d629 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/MacTest.java
@@ -31,11 +31,11 @@ import org.junit.rules.TemporaryFolder;
 
 public class MacTest {
   public static final Logger log = Logger.getLogger(MacTest.class);
-  public static TemporaryFolder folder = new TemporaryFolder();
-  public static MiniAccumuloCluster cluster;
   public static final String PASSWORD = "secret";
   static final ScannerOpts SOPTS = new ScannerOpts();
   static final BatchWriterOpts BWOPTS = new BatchWriterOpts();
+  public TemporaryFolder folder = new TemporaryFolder();
+  public MiniAccumuloCluster cluster;
   
   public Connector getConnector() throws AccumuloException, AccumuloSecurityException {
     return cluster.getConnector("root", PASSWORD);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/720e27a5/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
index e8a9d80..1be04b1 100644
--- a/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
+++ b/test/src/test/java/org/apache/accumulo/test/functional/SplitIT.java
@@ -101,7 +101,7 @@ public class SplitIT extends MacTest {
     Connector c = getConnector();
     c.tableOperations().create("test_ingest");
     c.tableOperations().setProperty("test_ingest", Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K");
-    DeleteIT.deleteTest(c);
+    DeleteIT.deleteTest(c, cluster);
     c.tableOperations().flush("test_ingest", null, null, true);
     UtilWaitThread.sleep(10*1000);
     assertTrue(c.tableOperations().listSplits("test_ingest").size() > 30);


[10/50] [abbrv] ACCUMULO-1481 : Add tests for splitting/merging root table; refactor to consolidate metadata constants and structures in an organized way; begin consolidating metadata ops into a servicer interface to abstract the code that actually does

Posted by ct...@apache.org.
http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
new file mode 100644
index 0000000..816df8b
--- /dev/null
+++ b/server/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -0,0 +1,1313 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.util;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Instance;
+import org.apache.accumulo.core.client.IsolatedScanner;
+import org.apache.accumulo.core.client.MutationsRejectedException;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.impl.BatchWriterImpl;
+import org.apache.accumulo.core.client.impl.ScannerImpl;
+import org.apache.accumulo.core.client.impl.Writer;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.KeyExtent;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.PartialKey;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.DataFileValue;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ChoppedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ClonedColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ScanFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.core.security.CredentialHelper;
+import org.apache.accumulo.core.security.thrift.TCredentials;
+import org.apache.accumulo.core.tabletserver.thrift.ConstraintViolationException;
+import org.apache.accumulo.core.util.ColumnFQ;
+import org.apache.accumulo.core.util.FastFormat;
+import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.core.util.StringUtil;
+import org.apache.accumulo.core.util.UtilWaitThread;
+import org.apache.accumulo.core.zookeeper.ZooUtil;
+import org.apache.accumulo.fate.zookeeper.IZooReaderWriter;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeExistsPolicy;
+import org.apache.accumulo.fate.zookeeper.ZooUtil.NodeMissingPolicy;
+import org.apache.accumulo.server.ServerConstants;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.fs.FileRef;
+import org.apache.accumulo.server.fs.VolumeManager;
+import org.apache.accumulo.server.fs.VolumeManagerImpl;
+import org.apache.accumulo.server.master.state.TServerInstance;
+import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.server.zookeeper.ZooLock;
+import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
+import org.apache.zookeeper.KeeperException;
+
+/**
+ * provides a reference to the metadata table for updates by tablet servers
+ */
+public class MetadataTableUtil {
+  
+  private static final Text EMPTY_TEXT = new Text();
+  private static Map<TCredentials,Writer> root_tables = new HashMap<TCredentials,Writer>();
+  private static Map<TCredentials,Writer> metadata_tables = new HashMap<TCredentials,Writer>();
+  private static final Logger log = Logger.getLogger(MetadataTableUtil.class);
+  
+  private static final int SAVE_ROOT_TABLET_RETRIES = 3;
+  
+  private MetadataTableUtil() {}
+  
+  public synchronized static Writer getMetadataTable(TCredentials credentials) {
+    Writer metadataTable = metadata_tables.get(credentials);
+    if (metadataTable == null) {
+      metadataTable = new Writer(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID);
+      metadata_tables.put(credentials, metadataTable);
+    }
+    return metadataTable;
+  }
+  
+  public synchronized static Writer getRootTable(TCredentials credentials) {
+    Writer rootTable = root_tables.get(credentials);
+    if (rootTable == null) {
+      rootTable = new Writer(HdfsZooInstance.getInstance(), credentials, RootTable.ID);
+      root_tables.put(credentials, rootTable);
+    }
+    return rootTable;
+  }
+  
+  public static void putLockID(ZooLock zooLock, Mutation m) {
+    TabletsSection.ServerColumnFamily.LOCK_COLUMN.put(m, new Value(zooLock.getLockID().serialize(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + "/")
+        .getBytes()));
+  }
+  
+  public static void update(TCredentials credentials, Mutation m, KeyExtent extent) {
+    update(credentials, null, m, extent);
+  }
+  
+  public static void update(TCredentials credentials, ZooLock zooLock, Mutation m, KeyExtent extent) {
+    Writer t = extent.isMeta() ? getRootTable(credentials) : getMetadataTable(credentials);
+    if (zooLock != null)
+      putLockID(zooLock, m);
+    while (true) {
+      try {
+        t.update(m);
+        return;
+      } catch (AccumuloException e) {
+        log.error(e, e);
+      } catch (AccumuloSecurityException e) {
+        log.error(e, e);
+      } catch (ConstraintViolationException e) {
+        log.error(e, e);
+      } catch (TableNotFoundException e) {
+        log.error(e, e);
+      }
+      UtilWaitThread.sleep(1000);
+    }
+    
+  }
+  
+  /**
+   * new data file update function adds one data file to a tablet's list
+   * 
+   * @param path
+   *          should be relative to the table directory
+   * 
+   */
+  public static void updateTabletDataFile(KeyExtent extent, FileRef path, FileRef mergeFile, DataFileValue dfv, String time, TCredentials credentials,
+      Set<FileRef> filesInUseByScans, String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) {
+    if (extent.equals(RootTable.EXTENT)) {
+      if (unusedWalLogs != null) {
+        IZooReaderWriter zk = ZooReaderWriter.getInstance();
+        // unusedWalLogs will contain the location/name of each log in a log set
+        // the log set is stored under one of the log names, but not both
+        // find the entry under one of the names and delete it.
+        String root = getZookeeperLogLocation();
+        boolean foundEntry = false;
+        for (String entry : unusedWalLogs) {
+          String[] parts = entry.split("/");
+          String zpath = root + "/" + parts[parts.length - 1];
+          while (true) {
+            try {
+              if (zk.exists(zpath)) {
+                zk.recursiveDelete(zpath, NodeMissingPolicy.SKIP);
+                foundEntry = true;
+              }
+              break;
+            } catch (KeeperException e) {
+              log.error(e, e);
+            } catch (InterruptedException e) {
+              log.error(e, e);
+            }
+            UtilWaitThread.sleep(1000);
+          }
+        }
+        if (unusedWalLogs.size() > 0 && !foundEntry)
+          log.warn("WALog entry for root tablet did not exist " + unusedWalLogs);
+      }
+      return;
+    }
+    
+    Mutation m = new Mutation(extent.getMetadataEntry());
+    
+    if (dfv.getNumEntries() > 0) {
+      m.put(DataFileColumnFamily.NAME, path.meta(), new Value(dfv.encode()));
+      TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes()));
+      // stuff in this location
+      TServerInstance self = getTServerInstance(address, zooLock);
+      self.putLastLocation(m);
+      // erase the old location
+      if (lastLocation != null && !lastLocation.equals(self))
+        lastLocation.clearLastLocation(m);
+    }
+    if (unusedWalLogs != null) {
+      for (String entry : unusedWalLogs) {
+        m.putDelete(LogColumnFamily.NAME, new Text(entry));
+      }
+    }
+    
+    for (FileRef scanFile : filesInUseByScans)
+      m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value("".getBytes()));
+    
+    if (mergeFile != null)
+      m.putDelete(DataFileColumnFamily.NAME, mergeFile.meta());
+    
+    TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value((flushId + "").getBytes()));
+    
+    update(credentials, zooLock, m, extent);
+    
+  }
+  
+  private static TServerInstance getTServerInstance(String address, ZooLock zooLock) {
+    while (true) {
+      try {
+        return new TServerInstance(address, zooLock.getSessionId());
+      } catch (KeeperException e) {
+        log.error(e, e);
+      } catch (InterruptedException e) {
+        log.error(e, e);
+      }
+      UtilWaitThread.sleep(1000);
+    }
+  }
+  
+  public static void updateTabletFlushID(KeyExtent extent, long flushID, TCredentials credentials, ZooLock zooLock) {
+    if (!extent.isRootTablet()) {
+      Mutation m = new Mutation(extent.getMetadataEntry());
+      TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value((flushID + "").getBytes()));
+      update(credentials, zooLock, m, extent);
+    }
+  }
+  
+  public static void updateTabletCompactID(KeyExtent extent, long compactID, TCredentials credentials, ZooLock zooLock) {
+    if (!extent.isRootTablet()) {
+      Mutation m = new Mutation(extent.getMetadataEntry());
+      TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value((compactID + "").getBytes()));
+      update(credentials, zooLock, m, extent);
+    }
+  }
+  
+  public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef,DataFileValue> estSizes, String time, TCredentials credentials,
+      ZooLock zooLock) {
+    Mutation m = new Mutation(extent.getMetadataEntry());
+    byte[] tidBytes = Long.toString(tid).getBytes();
+    
+    for (Entry<FileRef,DataFileValue> entry : estSizes.entrySet()) {
+      Text file = entry.getKey().meta();
+      m.put(DataFileColumnFamily.NAME, file, new Value(entry.getValue().encode()));
+      m.put(TabletsSection.BulkFileColumnFamily.NAME, file, new Value(tidBytes));
+    }
+    TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes()));
+    update(credentials, zooLock, m, extent);
+  }
+  
+  public static void addTablet(KeyExtent extent, String path, TCredentials credentials, char timeType, ZooLock lock) {
+    Mutation m = extent.getPrevRowUpdateMutation();
+    
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
+    TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value((timeType + "0").getBytes()));
+    
+    update(credentials, lock, m, extent);
+  }
+  
+  public static void updateTabletPrevEndRow(KeyExtent extent, TCredentials credentials) {
+    Mutation m = extent.getPrevRowUpdateMutation(); //
+    update(credentials, m, extent);
+  }
+  
+  /**
+   * convenience method for reading entries from the metadata table
+   */
+  public static SortedMap<KeyExtent,Text> getMetadataDirectoryEntries(SortedMap<Key,Value> entries) {
+    Key key;
+    Value val;
+    Text datafile = null;
+    Value prevRow = null;
+    KeyExtent ke;
+    
+    SortedMap<KeyExtent,Text> results = new TreeMap<KeyExtent,Text>();
+    
+    Text lastRowFromKey = new Text();
+    
+    // text obj below is meant to be reused in loop for efficiency
+    Text colf = new Text();
+    Text colq = new Text();
+    
+    for (Entry<Key,Value> entry : entries.entrySet()) {
+      key = entry.getKey();
+      val = entry.getValue();
+      
+      if (key.compareRow(lastRowFromKey) != 0) {
+        prevRow = null;
+        datafile = null;
+        key.getRow(lastRowFromKey);
+      }
+      
+      colf = key.getColumnFamily(colf);
+      colq = key.getColumnQualifier(colq);
+      
+      // interpret the row id as a key extent
+      if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.equals(colf, colq))
+        datafile = new Text(val.toString());
+      
+      else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.equals(colf, colq))
+        prevRow = new Value(val);
+      
+      if (datafile != null && prevRow != null) {
+        ke = new KeyExtent(key.getRow(), prevRow);
+        results.put(ke, datafile);
+        
+        datafile = null;
+        prevRow = null;
+      }
+    }
+    return results;
+  }
+  
+  public static boolean recordRootTabletLocation(String address) {
+    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
+    for (int i = 0; i < SAVE_ROOT_TABLET_RETRIES; i++) {
+      try {
+        log.info("trying to write root tablet location to ZooKeeper as " + address);
+        String zRootLocPath = ZooUtil.getRoot(HdfsZooInstance.getInstance()) + RootTable.ZROOT_TABLET_LOCATION;
+        zoo.putPersistentData(zRootLocPath, address.getBytes(), NodeExistsPolicy.OVERWRITE);
+        return true;
+      } catch (Exception e) {
+        log.error("Master: unable to save root tablet location in zookeeper. exception: " + e, e);
+      }
+    }
+    log.error("Giving up after " + SAVE_ROOT_TABLET_RETRIES + " retries");
+    return false;
+  }
+  
+  public static SortedMap<FileRef,DataFileValue> getDataFileSizes(KeyExtent extent, TCredentials credentials) throws IOException {
+    TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();
+    
+    Scanner mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
+    mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    Text row = extent.getMetadataEntry();
+    VolumeManager fs = VolumeManagerImpl.get();
+    
+    Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text(""));
+    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
+    
+    mdScanner.setRange(new Range(new Key(row), endKey));
+    for (Entry<Key,Value> entry : mdScanner) {
+      
+      if (!entry.getKey().getRow().equals(row))
+        break;
+      DataFileValue dfv = new DataFileValue(entry.getValue().get());
+      sizes.put(new FileRef(fs, entry.getKey()), dfv);
+    }
+    
+    return sizes;
+  }
+  
+  public static void addNewTablet(KeyExtent extent, String path, TServerInstance location, Map<FileRef,DataFileValue> datafileSizes,
+      Map<FileRef,Long> bulkLoadedFiles, TCredentials credentials, String time, long lastFlushID, long lastCompactID, ZooLock zooLock) {
+    Mutation m = extent.getPrevRowUpdateMutation();
+    
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(path.getBytes()));
+    TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes()));
+    if (lastFlushID > 0)
+      TabletsSection.ServerColumnFamily.FLUSH_COLUMN.put(m, new Value(("" + lastFlushID).getBytes()));
+    if (lastCompactID > 0)
+      TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + lastCompactID).getBytes()));
+    
+    if (location != null) {
+      m.put(TabletsSection.CurrentLocationColumnFamily.NAME, location.asColumnQualifier(), location.asMutationValue());
+      m.putDelete(TabletsSection.FutureLocationColumnFamily.NAME, location.asColumnQualifier());
+    }
+    
+    for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
+      m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
+    }
+    
+    for (Entry<FileRef,Long> entry : bulkLoadedFiles.entrySet()) {
+      byte[] tidBytes = Long.toString(entry.getValue()).getBytes();
+      m.put(TabletsSection.BulkFileColumnFamily.NAME, entry.getKey().meta(), new Value(tidBytes));
+    }
+    
+    update(credentials, zooLock, m, extent);
+  }
+  
+  public static void rollBackSplit(Text metadataEntry, Text oldPrevEndRow, TCredentials credentials, ZooLock zooLock) {
+    KeyExtent ke = new KeyExtent(metadataEntry, oldPrevEndRow);
+    Mutation m = ke.getPrevRowUpdateMutation();
+    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
+    TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
+    update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
+  }
+  
+  public static void splitTablet(KeyExtent extent, Text oldPrevEndRow, double splitRatio, TCredentials credentials, ZooLock zooLock) {
+    Mutation m = extent.getPrevRowUpdateMutation(); //
+    
+    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(splitRatio).getBytes()));
+    
+    TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(oldPrevEndRow));
+    ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
+    update(credentials, zooLock, m, extent);
+  }
+  
+  public static void finishSplit(Text metadataEntry, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, TCredentials credentials,
+      ZooLock zooLock) {
+    Mutation m = new Mutation(metadataEntry);
+    TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m);
+    TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m);
+    ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
+    
+    for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
+      m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
+    }
+    
+    for (FileRef pathToRemove : highDatafilesToRemove) {
+      m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
+    }
+    
+    update(credentials, zooLock, m, new KeyExtent(metadataEntry, (Text) null));
+  }
+  
+  public static void finishSplit(KeyExtent extent, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, TCredentials credentials,
+      ZooLock zooLock) {
+    finishSplit(extent.getMetadataEntry(), datafileSizes, highDatafilesToRemove, credentials, zooLock);
+  }
+  
+  public static void replaceDatafiles(KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId,
+      DataFileValue size, TCredentials credentials, String address, TServerInstance lastLocation, ZooLock zooLock) throws IOException {
+    replaceDatafiles(extent, datafilesToDelete, scanFiles, path, compactionId, size, credentials, address, lastLocation, zooLock, true);
+  }
+  
+  public static void replaceDatafiles(KeyExtent extent, Set<FileRef> datafilesToDelete, Set<FileRef> scanFiles, FileRef path, Long compactionId,
+      DataFileValue size, TCredentials credentials, String address, TServerInstance lastLocation, ZooLock zooLock, boolean insertDeleteFlags)
+      throws IOException {
+    
+    if (insertDeleteFlags) {
+      // add delete flags for those paths before the data file reference is removed
+      addDeleteEntries(extent, datafilesToDelete, credentials);
+    }
+    
+    // replace data file references to old mapfiles with the new mapfiles
+    Mutation m = new Mutation(extent.getMetadataEntry());
+    
+    for (FileRef pathToRemove : datafilesToDelete)
+      m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta());
+    
+    for (FileRef scanFile : scanFiles)
+      m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value("".getBytes()));
+    
+    if (size.getNumEntries() > 0)
+      m.put(DataFileColumnFamily.NAME, path.meta(), new Value(size.encode()));
+    
+    if (compactionId != null)
+      TabletsSection.ServerColumnFamily.COMPACT_COLUMN.put(m, new Value(("" + compactionId).getBytes()));
+    
+    TServerInstance self = getTServerInstance(address, zooLock);
+    self.putLastLocation(m);
+    
+    // remove the old location
+    if (lastLocation != null && !lastLocation.equals(self))
+      lastLocation.clearLastLocation(m);
+    
+    update(credentials, zooLock, m, extent);
+  }
+  
+  public static void addDeleteEntries(KeyExtent extent, Set<FileRef> datafilesToDelete, TCredentials credentials) throws IOException {
+    
+    String tableId = extent.getTableId().toString();
+    
+    // TODO could use batch writer,would need to handle failure and retry like update does - ACCUMULO-1294
+    for (FileRef pathToRemove : datafilesToDelete) {
+      update(credentials, createDeleteMutation(tableId, pathToRemove.path().toString()), extent);
+    }
+  }
+  
+  public static void addDeleteEntry(String tableId, String path) throws IOException {
+    update(SecurityConstants.getSystemCredentials(), createDeleteMutation(tableId, path), new KeyExtent(new Text(tableId), null, null));
+  }
+  
+  public static Mutation createDeleteMutation(String tableId, String pathToRemove) throws IOException {
+    if (!pathToRemove.contains(":")) {
+      if (pathToRemove.startsWith("../"))
+        pathToRemove = pathToRemove.substring(2);
+      else
+        pathToRemove = "/" + tableId + "/" + pathToRemove;
+    }
+    
+    Path path = VolumeManagerImpl.get().getFullPath(ServerConstants.getTablesDirs(), pathToRemove);
+    Mutation delFlag = new Mutation(new Text(MetadataSchema.DeletesSection.getRowPrefix() + path.toString()));
+    delFlag.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
+    return delFlag;
+  }
+  
+  public static void removeScanFiles(KeyExtent extent, Set<FileRef> scanFiles, TCredentials credentials, ZooLock zooLock) {
+    Mutation m = new Mutation(extent.getMetadataEntry());
+    
+    for (FileRef pathToRemove : scanFiles)
+      m.putDelete(ScanFileColumnFamily.NAME, pathToRemove.meta());
+    
+    update(credentials, zooLock, m, extent);
+  }
+  
+  private static KeyExtent fixSplit(Text table, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio, TServerInstance tserver,
+      TCredentials credentials, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
+    if (metadataPrevEndRow == null)
+      // something is wrong, this should not happen... if a tablet is split, it will always have a
+      // prev end row....
+      throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
+    
+    // check to see if prev tablet exist in metadata tablet
+    Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(table, metadataPrevEndRow)));
+    
+    ScannerImpl scanner2 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
+    scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
+    
+    VolumeManager fs = VolumeManagerImpl.get();
+    if (!scanner2.iterator().hasNext()) {
+      log.info("Rolling back incomplete split " + metadataEntry + " " + metadataPrevEndRow);
+      rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), credentials, lock);
+      return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
+    } else {
+      log.info("Finishing incomplete split " + metadataEntry + " " + metadataPrevEndRow);
+      
+      List<FileRef> highDatafilesToRemove = new ArrayList<FileRef>();
+      
+      Scanner scanner3 = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
+      Key rowKey = new Key(metadataEntry);
+      
+      SortedMap<FileRef,DataFileValue> origDatafileSizes = new TreeMap<FileRef,DataFileValue>();
+      SortedMap<FileRef,DataFileValue> highDatafileSizes = new TreeMap<FileRef,DataFileValue>();
+      SortedMap<FileRef,DataFileValue> lowDatafileSizes = new TreeMap<FileRef,DataFileValue>();
+      scanner3.fetchColumnFamily(DataFileColumnFamily.NAME);
+      scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
+      
+      for (Entry<Key,Value> entry : scanner3) {
+        if (entry.getKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
+          origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
+        }
+      }
+      
+      splitDatafiles(table, metadataPrevEndRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes,
+          highDatafilesToRemove);
+      
+      MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, credentials, lock);
+      
+      return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
+    }
+    
+  }
+  
+  public static void splitDatafiles(Text table, Text midRow, double splitRatio, Map<FileRef,FileUtil.FileInfo> firstAndLastRows,
+      SortedMap<FileRef,DataFileValue> datafiles, SortedMap<FileRef,DataFileValue> lowDatafileSizes, SortedMap<FileRef,DataFileValue> highDatafileSizes,
+      List<FileRef> highDatafilesToRemove) {
+    
+    for (Entry<FileRef,DataFileValue> entry : datafiles.entrySet()) {
+      
+      Text firstRow = null;
+      Text lastRow = null;
+      
+      boolean rowsKnown = false;
+      
+      FileUtil.FileInfo mfi = firstAndLastRows.get(entry.getKey());
+      
+      if (mfi != null) {
+        firstRow = mfi.getFirstRow();
+        lastRow = mfi.getLastRow();
+        rowsKnown = true;
+      }
+      
+      if (rowsKnown && firstRow.compareTo(midRow) > 0) {
+        // only in high
+        long highSize = entry.getValue().getSize();
+        long highEntries = entry.getValue().getNumEntries();
+        highDatafileSizes.put(entry.getKey(), new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
+      } else if (rowsKnown && lastRow.compareTo(midRow) <= 0) {
+        // only in low
+        long lowSize = entry.getValue().getSize();
+        long lowEntries = entry.getValue().getNumEntries();
+        lowDatafileSizes.put(entry.getKey(), new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));
+        
+        highDatafilesToRemove.add(entry.getKey());
+      } else {
+        long lowSize = (long) Math.floor((entry.getValue().getSize() * splitRatio));
+        long lowEntries = (long) Math.floor((entry.getValue().getNumEntries() * splitRatio));
+        lowDatafileSizes.put(entry.getKey(), new DataFileValue(lowSize, lowEntries, entry.getValue().getTime()));
+        
+        long highSize = (long) Math.ceil((entry.getValue().getSize() * (1.0 - splitRatio)));
+        long highEntries = (long) Math.ceil((entry.getValue().getNumEntries() * (1.0 - splitRatio)));
+        highDatafileSizes.put(entry.getKey(), new DataFileValue(highSize, highEntries, entry.getValue().getTime()));
+      }
+    }
+  }
+  
+  public static KeyExtent fixSplit(Text metadataEntry, SortedMap<ColumnFQ,Value> columns, TServerInstance tserver, TCredentials credentials, ZooLock lock)
+      throws AccumuloException, IOException {
+    log.info("Incomplete split " + metadataEntry + " attempting to fix");
+    
+    Value oper = columns.get(TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN);
+    
+    if (columns.get(TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN) == null) {
+      throw new IllegalArgumentException("Metadata entry does not have split ratio (" + metadataEntry + ")");
+    }
+    
+    double splitRatio = Double.parseDouble(new String(columns.get(TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN).get()));
+    
+    Value prevEndRowIBW = columns.get(TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN);
+    
+    if (prevEndRowIBW == null) {
+      throw new IllegalArgumentException("Metadata entry does not have prev row (" + metadataEntry + ")");
+    }
+    
+    Value time = columns.get(TabletsSection.ServerColumnFamily.TIME_COLUMN);
+    
+    if (time == null) {
+      throw new IllegalArgumentException("Metadata entry does not have time (" + metadataEntry + ")");
+    }
+    
+    Value flushID = columns.get(TabletsSection.ServerColumnFamily.FLUSH_COLUMN);
+    long initFlushID = -1;
+    if (flushID != null)
+      initFlushID = Long.parseLong(flushID.toString());
+    
+    Value compactID = columns.get(TabletsSection.ServerColumnFamily.COMPACT_COLUMN);
+    long initCompactID = -1;
+    if (compactID != null)
+      initCompactID = Long.parseLong(compactID.toString());
+    
+    Text metadataPrevEndRow = KeyExtent.decodePrevEndRow(prevEndRowIBW);
+    
+    Text table = (new KeyExtent(metadataEntry, (Text) null)).getTableId();
+    
+    return fixSplit(table, metadataEntry, metadataPrevEndRow, oper, splitRatio, tserver, credentials, time.toString(), initFlushID, initCompactID, lock);
+  }
+  
+  public static void deleteTable(String tableId, boolean insertDeletes, TCredentials credentials, ZooLock lock) throws AccumuloException, IOException {
+    Scanner ms = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, Authorizations.EMPTY);
+    Text tableIdText = new Text(tableId);
+    BatchWriter bw = new BatchWriterImpl(HdfsZooInstance.getInstance(), credentials, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000)
+        .setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2));
+    
+    // scan metadata for our table and delete everything we find
+    Mutation m = null;
+    ms.setRange(new KeyExtent(tableIdText, null, null).toMetadataRange());
+    
+    // insert deletes before deleting data from !METADATA... this makes the code fault tolerant
+    if (insertDeletes) {
+      
+      ms.fetchColumnFamily(DataFileColumnFamily.NAME);
+      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
+      
+      for (Entry<Key,Value> cell : ms) {
+        Key key = cell.getKey();
+        
+        if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
+          FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
+          bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
+        }
+        
+        if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
+          bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
+        }
+      }
+      
+      bw.flush();
+      
+      ms.clearColumns();
+    }
+    
+    for (Entry<Key,Value> cell : ms) {
+      Key key = cell.getKey();
+      
+      if (m == null) {
+        m = new Mutation(key.getRow());
+        if (lock != null)
+          putLockID(lock, m);
+      }
+      
+      if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
+        bw.addMutation(m);
+        m = new Mutation(key.getRow());
+        if (lock != null)
+          putLockID(lock, m);
+      }
+      m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
+    }
+    
+    if (m != null)
+      bw.addMutation(m);
+    
+    bw.close();
+  }
+  
+  public static class LogEntry {
+    public KeyExtent extent;
+    public long timestamp;
+    public String server;
+    public String filename;
+    public int tabletId;
+    public Collection<String> logSet;
+    
+    @Override
+    public String toString() {
+      return extent.toString() + " " + filename + " (" + tabletId + ")";
+    }
+    
+    public String getName() {
+      return server + "/" + filename;
+    }
+    
+    public byte[] toBytes() throws IOException {
+      DataOutputBuffer out = new DataOutputBuffer();
+      extent.write(out);
+      out.writeLong(timestamp);
+      out.writeUTF(server);
+      out.writeUTF(filename.toString());
+      out.write(tabletId);
+      out.write(logSet.size());
+      for (String s : logSet) {
+        out.writeUTF(s);
+      }
+      return Arrays.copyOf(out.getData(), out.getLength());
+    }
+    
+    public void fromBytes(byte bytes[]) throws IOException {
+      DataInputBuffer inp = new DataInputBuffer();
+      inp.reset(bytes, bytes.length);
+      extent = new KeyExtent();
+      extent.readFields(inp);
+      timestamp = inp.readLong();
+      server = inp.readUTF();
+      filename = inp.readUTF();
+      tabletId = inp.read();
+      int count = inp.read();
+      ArrayList<String> logSet = new ArrayList<String>(count);
+      for (int i = 0; i < count; i++)
+        logSet.add(inp.readUTF());
+      this.logSet = logSet;
+    }
+    
+  }
+  
+  private static String getZookeeperLogLocation() {
+    return ZooUtil.getRoot(HdfsZooInstance.getInstance()) + RootTable.ZROOT_TABLET_WALOGS;
+  }
+  
+  public static void addLogEntry(TCredentials credentials, LogEntry entry, ZooLock zooLock) {
+    if (entry.extent.isRootTablet()) {
+      String root = getZookeeperLogLocation();
+      while (true) {
+        try {
+          IZooReaderWriter zoo = ZooReaderWriter.getInstance();
+          if (zoo.isLockHeld(zooLock.getLockID())) {
+            String[] parts = entry.filename.split("/");
+            String uniqueId = parts[parts.length - 1];
+            zoo.putPersistentData(root + "/" + uniqueId, entry.toBytes(), NodeExistsPolicy.OVERWRITE);
+          }
+          break;
+        } catch (KeeperException e) {
+          log.error(e, e);
+        } catch (InterruptedException e) {
+          log.error(e, e);
+        } catch (IOException e) {
+          log.error(e, e);
+        }
+        UtilWaitThread.sleep(1000);
+      }
+    } else {
+      String value = StringUtil.join(entry.logSet, ";") + "|" + entry.tabletId;
+      Mutation m = new Mutation(entry.extent.getMetadataEntry());
+      m.put(LogColumnFamily.NAME, new Text(entry.server + "/" + entry.filename), new Value(value.getBytes()));
+      update(credentials, zooLock, m, entry.extent);
+    }
+  }
+  
+  public static LogEntry entryFromKeyValue(Key key, Value value) {
+    MetadataTableUtil.LogEntry e = new MetadataTableUtil.LogEntry();
+    e.extent = new KeyExtent(key.getRow(), EMPTY_TEXT);
+    String[] parts = key.getColumnQualifier().toString().split("/", 2);
+    e.server = parts[0];
+    e.filename = parts[1];
+    parts = value.toString().split("\\|");
+    e.tabletId = Integer.parseInt(parts[1]);
+    e.logSet = Arrays.asList(parts[0].split(";"));
+    e.timestamp = key.getTimestamp();
+    return e;
+  }
+  
+  public static Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>> getFileAndLogEntries(TCredentials credentials, KeyExtent extent) throws KeeperException,
+      InterruptedException, IOException {
+    ArrayList<LogEntry> result = new ArrayList<LogEntry>();
+    TreeMap<FileRef,DataFileValue> sizes = new TreeMap<FileRef,DataFileValue>();
+    
+    VolumeManager fs = VolumeManagerImpl.get();
+    if (extent.isRootTablet()) {
+      getRootLogEntries(result);
+      Path rootDir = new Path(ServerConstants.getRootTabletDir());
+      rootDir = rootDir.makeQualified(fs.getDefaultVolume());
+      FileStatus[] files = fs.listStatus(rootDir);
+      for (FileStatus fileStatus : files) {
+        if (fileStatus.getPath().toString().endsWith("_tmp")) {
+          continue;
+        }
+        DataFileValue dfv = new DataFileValue(0, 0);
+        sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
+      }
+      
+    } else {
+      String systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
+      Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, systemTableToCheck, Authorizations.EMPTY);
+      scanner.fetchColumnFamily(LogColumnFamily.NAME);
+      scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+      scanner.setRange(extent.toMetadataRange());
+      
+      for (Entry<Key,Value> entry : scanner) {
+        if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
+          throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
+        }
+        
+        if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
+          result.add(entryFromKeyValue(entry.getKey(), entry.getValue()));
+        } else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
+          DataFileValue dfv = new DataFileValue(entry.getValue().get());
+          sizes.put(new FileRef(fs, entry.getKey()), dfv);
+        } else {
+          throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
+        }
+      }
+    }
+    
+    return new Pair<List<LogEntry>,SortedMap<FileRef,DataFileValue>>(result, sizes);
+  }
+  
+  public static List<LogEntry> getLogEntries(TCredentials credentials, KeyExtent extent) throws IOException, KeeperException, InterruptedException {
+    log.info("Scanning logging entries for " + extent);
+    ArrayList<LogEntry> result = new ArrayList<LogEntry>();
+    if (extent.equals(RootTable.EXTENT)) {
+      log.info("Getting logs for root tablet from zookeeper");
+      getRootLogEntries(result);
+    } else {
+      log.info("Scanning metadata for logs used for tablet " + extent);
+      Scanner scanner = getTabletLogScanner(credentials, extent);
+      Text pattern = extent.getMetadataEntry();
+      for (Entry<Key,Value> entry : scanner) {
+        Text row = entry.getKey().getRow();
+        if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
+          if (row.equals(pattern)) {
+            result.add(entryFromKeyValue(entry.getKey(), entry.getValue()));
+          }
+        }
+      }
+    }
+    
+    Collections.sort(result, new Comparator<LogEntry>() {
+      @Override
+      public int compare(LogEntry o1, LogEntry o2) {
+        long diff = o1.timestamp - o2.timestamp;
+        if (diff < 0)
+          return -1;
+        if (diff > 0)
+          return 1;
+        return 0;
+      }
+    });
+    log.info("Returning logs " + result + " for extent " + extent);
+    return result;
+  }
+  
+  private static void getRootLogEntries(ArrayList<LogEntry> result) throws KeeperException, InterruptedException, IOException {
+    IZooReaderWriter zoo = ZooReaderWriter.getInstance();
+    String root = getZookeeperLogLocation();
+    // there's a little race between getting the children and fetching
+    // the data. The log can be removed in between.
+    while (true) {
+      result.clear();
+      for (String child : zoo.getChildren(root)) {
+        LogEntry e = new LogEntry();
+        try {
+          e.fromBytes(zoo.getData(root + "/" + child, null));
+          result.add(e);
+        } catch (KeeperException.NoNodeException ex) {
+          continue;
+        }
+      }
+      break;
+    }
+  }
+  
+  private static Scanner getTabletLogScanner(TCredentials credentials, KeyExtent extent) {
+    String tableId = MetadataTable.ID;
+    if (extent.isMeta())
+      tableId = RootTable.ID;
+    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, tableId, Authorizations.EMPTY);
+    scanner.fetchColumnFamily(LogColumnFamily.NAME);
+    Text start = extent.getMetadataEntry();
+    Key endKey = new Key(start, LogColumnFamily.NAME);
+    endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
+    scanner.setRange(new Range(new Key(start), endKey));
+    return scanner;
+  }
+  
+  static class LogEntryIterator implements Iterator<LogEntry> {
+    
+    Iterator<LogEntry> rootTabletEntries = null;
+    Iterator<Entry<Key,Value>> metadataEntries = null;
+    
+    LogEntryIterator(TCredentials creds) throws IOException, KeeperException, InterruptedException {
+      rootTabletEntries = getLogEntries(creds, RootTable.EXTENT).iterator();
+      try {
+        Scanner scanner = HdfsZooInstance.getInstance().getConnector(creds.getPrincipal(), CredentialHelper.extractToken(creds))
+            .createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+        log.info("Setting range to " + MetadataSchema.TabletsSection.getRange());
+        scanner.setRange(MetadataSchema.TabletsSection.getRange());
+        scanner.fetchColumnFamily(LogColumnFamily.NAME);
+        metadataEntries = scanner.iterator();
+      } catch (Exception ex) {
+        throw new IOException(ex);
+      }
+    }
+    
+    @Override
+    public boolean hasNext() {
+      return rootTabletEntries.hasNext() || metadataEntries.hasNext();
+    }
+    
+    @Override
+    public LogEntry next() {
+      if (rootTabletEntries.hasNext()) {
+        return rootTabletEntries.next();
+      }
+      Entry<Key,Value> entry = metadataEntries.next();
+      return entryFromKeyValue(entry.getKey(), entry.getValue());
+    }
+    
+    @Override
+    public void remove() {
+      throw new UnsupportedOperationException();
+    }
+  }
+  
+  public static Iterator<LogEntry> getLogEntries(TCredentials creds) throws IOException, KeeperException, InterruptedException {
+    return new LogEntryIterator(creds);
+  }
+  
+  public static void removeUnusedWALEntries(KeyExtent extent, List<LogEntry> logEntries, ZooLock zooLock) {
+    for (LogEntry entry : logEntries) {
+      if (entry.extent.isRootTablet()) {
+        String root = getZookeeperLogLocation();
+        while (true) {
+          try {
+            IZooReaderWriter zoo = ZooReaderWriter.getInstance();
+            if (zoo.isLockHeld(zooLock.getLockID()))
+              zoo.recursiveDelete(root + "/" + entry.filename, NodeMissingPolicy.SKIP);
+            break;
+          } catch (Exception e) {
+            log.error(e, e);
+          }
+          UtilWaitThread.sleep(1000);
+        }
+      } else {
+        Mutation m = new Mutation(entry.extent.getMetadataEntry());
+        m.putDelete(LogColumnFamily.NAME, new Text(entry.server + "/" + entry.filename));
+        update(SecurityConstants.getSystemCredentials(), zooLock, m, entry.extent);
+      }
+    }
+  }
+  
+  private static void getFiles(Set<String> files, Map<Key,Value> tablet, String srcTableId) {
+    for (Entry<Key,Value> entry : tablet.entrySet()) {
+      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
+        String cf = entry.getKey().getColumnQualifier().toString();
+        if (srcTableId != null && !cf.startsWith("../") && !cf.contains(":")) {
+          cf = "../" + srcTableId + entry.getKey().getColumnQualifier();
+        }
+        files.add(cf);
+      }
+    }
+  }
+  
+  private static Mutation createCloneMutation(String srcTableId, String tableId, Map<Key,Value> tablet) {
+    
+    KeyExtent ke = new KeyExtent(tablet.keySet().iterator().next().getRow(), (Text) null);
+    Mutation m = new Mutation(KeyExtent.getMetadataEntry(new Text(tableId), ke.getEndRow()));
+    
+    for (Entry<Key,Value> entry : tablet.entrySet()) {
+      if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
+        String cf = entry.getKey().getColumnQualifier().toString();
+        if (!cf.startsWith("../") && !cf.contains(":"))
+          cf = "../" + srcTableId + entry.getKey().getColumnQualifier();
+        m.put(entry.getKey().getColumnFamily(), new Text(cf), entry.getValue());
+      } else if (entry.getKey().getColumnFamily().equals(TabletsSection.CurrentLocationColumnFamily.NAME)) {
+        m.put(TabletsSection.LastLocationColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
+      } else if (entry.getKey().getColumnFamily().equals(TabletsSection.LastLocationColumnFamily.NAME)) {
+        // skip
+      } else {
+        m.put(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier(), entry.getValue());
+      }
+    }
+    return m;
+  }
+  
+  private static Scanner createCloneScanner(String tableId, Connector conn) throws TableNotFoundException {
+    Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
+    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
+    mscanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    mscanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
+    mscanner.fetchColumnFamily(TabletsSection.LastLocationColumnFamily.NAME);
+    mscanner.fetchColumnFamily(ClonedColumnFamily.NAME);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(mscanner);
+    TabletsSection.ServerColumnFamily.TIME_COLUMN.fetch(mscanner);
+    return mscanner;
+  }
+  
+  static void initializeClone(String srcTableId, String tableId, Connector conn, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException {
+    TabletIterator ti = new TabletIterator(createCloneScanner(srcTableId, conn), new KeyExtent(new Text(srcTableId), null, null).toMetadataRange(), true, true);
+    
+    if (!ti.hasNext())
+      throw new RuntimeException(" table deleted during clone?  srcTableId = " + srcTableId);
+    
+    while (ti.hasNext())
+      bw.addMutation(createCloneMutation(srcTableId, tableId, ti.next()));
+    
+    bw.flush();
+  }
+  
+  static int compareEndRows(Text endRow1, Text endRow2) {
+    return new KeyExtent(new Text("0"), endRow1, null).compareTo(new KeyExtent(new Text("0"), endRow2, null));
+  }
+  
+  static int checkClone(String srcTableId, String tableId, Connector conn, BatchWriter bw) throws TableNotFoundException, MutationsRejectedException {
+    TabletIterator srcIter = new TabletIterator(createCloneScanner(srcTableId, conn), new KeyExtent(new Text(srcTableId), null, null).toMetadataRange(), true,
+        true);
+    TabletIterator cloneIter = new TabletIterator(createCloneScanner(tableId, conn), new KeyExtent(new Text(tableId), null, null).toMetadataRange(), true, true);
+    
+    if (!cloneIter.hasNext() || !srcIter.hasNext())
+      throw new RuntimeException(" table deleted during clone?  srcTableId = " + srcTableId + " tableId=" + tableId);
+    
+    int rewrites = 0;
+    
+    while (cloneIter.hasNext()) {
+      Map<Key,Value> cloneTablet = cloneIter.next();
+      Text cloneEndRow = new KeyExtent(cloneTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();
+      HashSet<String> cloneFiles = new HashSet<String>();
+      
+      boolean cloneSuccessful = false;
+      for (Entry<Key,Value> entry : cloneTablet.entrySet()) {
+        if (entry.getKey().getColumnFamily().equals(ClonedColumnFamily.NAME)) {
+          cloneSuccessful = true;
+          break;
+        }
+      }
+      
+      if (!cloneSuccessful)
+        getFiles(cloneFiles, cloneTablet, null);
+      
+      List<Map<Key,Value>> srcTablets = new ArrayList<Map<Key,Value>>();
+      Map<Key,Value> srcTablet = srcIter.next();
+      srcTablets.add(srcTablet);
+      
+      Text srcEndRow = new KeyExtent(srcTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();
+      
+      int cmp = compareEndRows(cloneEndRow, srcEndRow);
+      if (cmp < 0)
+        throw new TabletIterator.TabletDeletedException("Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);
+      
+      HashSet<String> srcFiles = new HashSet<String>();
+      if (!cloneSuccessful)
+        getFiles(srcFiles, srcTablet, srcTableId);
+      
+      while (cmp > 0) {
+        srcTablet = srcIter.next();
+        srcTablets.add(srcTablet);
+        srcEndRow = new KeyExtent(srcTablet.keySet().iterator().next().getRow(), (Text) null).getEndRow();
+        cmp = compareEndRows(cloneEndRow, srcEndRow);
+        if (cmp < 0)
+          throw new TabletIterator.TabletDeletedException("Tablets deleted from src during clone : " + cloneEndRow + " " + srcEndRow);
+        
+        if (!cloneSuccessful)
+          getFiles(srcFiles, srcTablet, srcTableId);
+      }
+      
+      if (cloneSuccessful)
+        continue;
+      
+      if (!srcFiles.containsAll(cloneFiles)) {
+        // delete existing cloned tablet entry
+        Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow());
+        
+        for (Entry<Key,Value> entry : cloneTablet.entrySet()) {
+          Key k = entry.getKey();
+          m.putDelete(k.getColumnFamily(), k.getColumnQualifier(), k.getTimestamp());
+        }
+        
+        bw.addMutation(m);
+        
+        for (Map<Key,Value> st : srcTablets)
+          bw.addMutation(createCloneMutation(srcTableId, tableId, st));
+        
+        rewrites++;
+      } else {
+        // write out marker that this tablet was successfully cloned
+        Mutation m = new Mutation(cloneTablet.keySet().iterator().next().getRow());
+        m.put(ClonedColumnFamily.NAME, new Text(""), new Value("OK".getBytes()));
+        bw.addMutation(m);
+      }
+    }
+    
+    bw.flush();
+    return rewrites;
+  }
+  
+  public static void cloneTable(Instance instance, String srcTableId, String tableId) throws Exception {
+    
+    Connector conn = instance.getConnector(SecurityConstants.SYSTEM_PRINCIPAL, SecurityConstants.getSystemToken());
+    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+    
+    while (true) {
+      
+      try {
+        initializeClone(srcTableId, tableId, conn, bw);
+        
+        // the following loop looks changes in the file that occurred during the copy.. if files were dereferenced then they could have been GCed
+        
+        while (true) {
+          int rewrites = checkClone(srcTableId, tableId, conn, bw);
+          
+          if (rewrites == 0)
+            break;
+        }
+        
+        bw.flush();
+        break;
+        
+      } catch (TabletIterator.TabletDeletedException tde) {
+        // tablets were merged in the src table
+        bw.flush();
+        
+        // delete what we have cloned and try again
+        deleteTable(tableId, false, SecurityConstants.getSystemCredentials(), null);
+        
+        log.debug("Tablets merged in table " + srcTableId + " while attempting to clone, trying again");
+        
+        UtilWaitThread.sleep(100);
+      }
+    }
+    
+    // delete the clone markers and create directory entries
+    Scanner mscanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
+    mscanner.fetchColumnFamily(ClonedColumnFamily.NAME);
+    
+    int dirCount = 0;
+    
+    for (Entry<Key,Value> entry : mscanner) {
+      Key k = entry.getKey();
+      Mutation m = new Mutation(k.getRow());
+      m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
+      TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes())));
+      bw.addMutation(m);
+    }
+    
+    bw.close();
+    
+  }
+  
+  public static void chopped(KeyExtent extent, ZooLock zooLock) {
+    Mutation m = new Mutation(extent.getMetadataEntry());
+    ChoppedColumnFamily.CHOPPED_COLUMN.put(m, new Value("chopped".getBytes()));
+    update(SecurityConstants.getSystemCredentials(), zooLock, m, extent);
+  }
+  
+  public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception {
+    Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY));
+    mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
+    mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
+    BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
+    for (Entry<Key,Value> entry : mscanner) {
+      log.debug("Looking at entry " + entry + " with tid " + tid);
+      if (Long.parseLong(entry.getValue().toString()) == tid) {
+        log.debug("deleting entry " + entry);
+        Mutation m = new Mutation(entry.getKey().getRow());
+        m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
+        bw.addMutation(m);
+      }
+    }
+    bw.close();
+  }
+  
+  public static List<FileRef> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid) throws IOException {
+    List<FileRef> result = new ArrayList<FileRef>();
+    try {
+      VolumeManager fs = VolumeManagerImpl.get();
+      Scanner mscanner = new IsolatedScanner(conn.createScanner(extent.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY));
+      mscanner.setRange(extent.toMetadataRange());
+      mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
+      for (Entry<Key,Value> entry : mscanner) {
+        if (Long.parseLong(entry.getValue().toString()) == tid) {
+          result.add(new FileRef(fs, entry.getKey()));
+        }
+      }
+      return result;
+    } catch (TableNotFoundException ex) {
+      // unlikely
+      throw new RuntimeException("Onos! teh metadata table has vanished!!");
+    }
+  }
+  
+  public static Map<FileRef,Long> getBulkFilesLoaded(TCredentials credentials, KeyExtent extent) throws IOException {
+    Text metadataRow = extent.getMetadataEntry();
+    Map<FileRef,Long> ret = new HashMap<FileRef,Long>();
+    
+    VolumeManager fs = VolumeManagerImpl.get();
+    Scanner scanner = new ScannerImpl(HdfsZooInstance.getInstance(), credentials, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY);
+    scanner.setRange(new Range(metadataRow));
+    scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
+    for (Entry<Key,Value> entry : scanner) {
+      Long tid = Long.parseLong(entry.getValue().toString());
+      ret.put(new FileRef(fs, entry.getKey()), tid);
+    }
+    return ret;
+  }
+  
+  public static void addBulkLoadInProgressFlag(String path) {
+    
+    Mutation m = new Mutation(MetadataSchema.BlipSection.getRowPrefix() + path);
+    m.put(EMPTY_TEXT, EMPTY_TEXT, new Value(new byte[] {}));
+    
+    // new KeyExtent is only added to force update to write to the metadata table, not the root table
+    // because bulk loads aren't supported to the metadata table
+    update(SecurityConstants.getSystemCredentials(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
+  }
+  
+  public static void removeBulkLoadInProgressFlag(String path) {
+    
+    Mutation m = new Mutation(MetadataSchema.BlipSection.getRowPrefix() + path);
+    m.putDelete(EMPTY_TEXT, EMPTY_TEXT);
+    
+    // new KeyExtent is only added to force update to write to the metadata table, not the root table
+    // because bulk loads aren't supported to the metadata table
+    update(SecurityConstants.getSystemCredentials(), m, new KeyExtent(new Text("anythingNotMetadata"), null, null));
+  }
+  
+  public static void moveMetaDeleteMarkers(Instance instance, TCredentials creds) {
+    // move old delete markers to new location, to standardize table schema between all metadata tables
+    byte[] EMPTY_BYTES = new byte[0];
+    Scanner scanner = new ScannerImpl(instance, creds, RootTable.ID, Authorizations.EMPTY);
+    String oldDeletesPrefix = "!!~del";
+    Range oldDeletesRange = new Range(oldDeletesPrefix, true, "!!~dem", false);
+    scanner.setRange(oldDeletesRange);
+    for (Entry<Key,Value> entry : scanner) {
+      String row = entry.getKey().getRow().toString();
+      if (row.startsWith(oldDeletesPrefix)) {
+        String filename = row.substring(oldDeletesPrefix.length());
+        // add the new entry first
+        log.info("Moving " + filename + " marker in " + RootTable.NAME);
+        Mutation m = new Mutation(MetadataSchema.DeletesSection.getRowPrefix() + filename);
+        m.put(EMPTY_BYTES, EMPTY_BYTES, EMPTY_BYTES);
+        update(creds, m, null);
+        // remove the old entry
+        m = new Mutation(entry.getKey().getRow());
+        m.putDelete(EMPTY_BYTES, EMPTY_BYTES);
+        update(creds, m, null);
+      } else {
+        break;
+      }
+    }
+    
+  }
+  
+  public static SortedMap<Text,SortedMap<ColumnFQ,Value>> getTabletEntries(SortedMap<Key,Value> tabletKeyValues, List<ColumnFQ> columns) {
+    TreeMap<Text,SortedMap<ColumnFQ,Value>> tabletEntries = new TreeMap<Text,SortedMap<ColumnFQ,Value>>();
+    
+    HashSet<ColumnFQ> colSet = null;
+    if (columns != null) {
+      colSet = new HashSet<ColumnFQ>(columns);
+    }
+    
+    for (Entry<Key,Value> entry : tabletKeyValues.entrySet()) {
+      
+      if (columns != null && !colSet.contains(new ColumnFQ(entry.getKey()))) {
+        continue;
+      }
+      
+      Text row = entry.getKey().getRow();
+      
+      SortedMap<ColumnFQ,Value> colVals = tabletEntries.get(row);
+      if (colVals == null) {
+        colVals = new TreeMap<ColumnFQ,Value>();
+        tabletEntries.put(row, colVals);
+      }
+      
+      colVals.put(new ColumnFQ(entry.getKey()), entry.getValue());
+    }
+    
+    return tabletEntries;
+  }
+  
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java b/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
index c8fa771..5e82ada 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/OfflineMetadataScanner.java
@@ -44,16 +44,19 @@ import org.apache.accumulo.core.iterators.system.DeletingIterator;
 import org.apache.accumulo.core.iterators.system.MultiIterator;
 import org.apache.accumulo.core.iterators.system.VisibilityFilter;
 import org.apache.accumulo.core.iterators.user.VersioningIterator;
+import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.LocalityGroupUtil;
-import org.apache.accumulo.core.util.RootTable;
 import org.apache.accumulo.core.util.TextUtil;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.conf.ServerConfiguration;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
-import org.apache.accumulo.server.util.MetadataTable.LogEntry;
+import org.apache.accumulo.server.util.MetadataTableUtil.LogEntry;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -126,7 +129,7 @@ public class OfflineMetadataScanner extends ScannerOptions implements Scanner {
     this.conf = conf;
     List<LogEntry> rwal;
     try {
-      rwal = MetadataTable.getLogEntries(null, RootTable.EXTENT);
+      rwal = MetadataTableUtil.getLogEntries(null, RootTable.EXTENT);
     } catch (Exception e) {
       throw new RuntimeException("Failed to check if root tablet has write ahead log entries", e);
     }
@@ -144,15 +147,15 @@ public class OfflineMetadataScanner extends ScannerOptions implements Scanner {
     List<SortedKeyValueIterator<Key,Value>> readers = openMapFiles(allFiles, fs, conf);
     
     HashSet<Column> columns = new HashSet<Column>();
-    columns.add(new Column(TextUtil.getBytes(MetadataTable.DATAFILE_COLUMN_FAMILY), null, null));
-    columns.add(new Column(TextUtil.getBytes(MetadataTable.LOG_COLUMN_FAMILY), null, null));
+    columns.add(new Column(TextUtil.getBytes(DataFileColumnFamily.NAME), null, null));
+    columns.add(new Column(TextUtil.getBytes(LogColumnFamily.NAME), null, null));
     
     SortedKeyValueIterator<Key,Value> ssi = createSystemIter(new Range(), readers, columns);
     
     int walogs = 0;
     
     while (ssi.hasTop()) {
-      if (ssi.getTopKey().compareColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY) == 0) {
+      if (ssi.getTopKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
         allFiles.add(fs.getFullPath(ssi.getTopKey()).toString());
       } else {
         walogs++;
@@ -261,7 +264,7 @@ public class OfflineMetadataScanner extends ScannerOptions implements Scanner {
     ServerConfiguration conf = new ServerConfiguration(HdfsZooInstance.getInstance());
     VolumeManager fs = VolumeManagerImpl.get();
     OfflineMetadataScanner scanner = new OfflineMetadataScanner(conf.getConfiguration(), fs);
-    scanner.setRange(MetadataTable.KEYSPACE);
+    scanner.setRange(MetadataSchema.TabletsSection.getRange());
     for (Entry<Key,Value> entry : scanner)
       System.out.println(entry.getKey() + " " + entry.getValue());
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java b/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
index dad3b64..c1795e3 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
@@ -26,7 +26,9 @@ import org.apache.accumulo.core.client.Scanner;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
@@ -43,7 +45,7 @@ public class RemoveEntriesForMissingFiles {
   private static Logger log = Logger.getLogger(RemoveEntriesForMissingFiles.class);
   
   static class Opts extends ClientOpts {
-    @Parameter(names="--fix")
+    @Parameter(names = "--fix")
     boolean fix = false;
   }
   
@@ -56,11 +58,11 @@ public class RemoveEntriesForMissingFiles {
     Connector connector = opts.getConnector();
     Scanner metadata = connector.createScanner(MetadataTable.NAME, opts.auths);
     metadata.setBatchSize(scanOpts.scanBatchSize);
-    metadata.setRange(MetadataTable.KEYSPACE);
-    metadata.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+    metadata.setRange(MetadataSchema.TabletsSection.getRange());
+    metadata.fetchColumnFamily(DataFileColumnFamily.NAME);
     int count = 0;
     int missing = 0;
-    BatchWriter writer = null; 
+    BatchWriter writer = null;
     if (opts.fix)
       writer = connector.createBatchWriter(MetadataTable.NAME, bwOpts.getBatchWriterConfig());
     for (Entry<Key,Value> entry : metadata) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java b/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
index ff79921..377542c 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/TableDiskUsage.java
@@ -39,6 +39,8 @@ import org.apache.accumulo.core.conf.DefaultConfiguration;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.util.NumUtil;
 import org.apache.accumulo.server.ServerConstants;
@@ -54,7 +56,6 @@ import com.beust.jcommander.Parameter;
 
 public class TableDiskUsage {
   
-  
   private static final Logger log = Logger.getLogger(Logger.class);
   private int nextInternalId = 0;
   private Map<String,Integer> internalIds = new HashMap<String,Integer>();
@@ -91,7 +92,7 @@ public class TableDiskUsage {
   }
   
   Map<List<String>,Long> calculateUsage() {
-
+    
     Map<List<Integer>,Long> usage = new HashMap<List<Integer>,Long>();
     
     for (Entry<String,Integer[]> entry : tableFiles.entrySet()) {
@@ -138,8 +139,7 @@ public class TableDiskUsage {
     }, humanReadable);
   }
   
-  public static Map<TreeSet<String>,Long> getDiskUsage(AccumuloConfiguration acuConf, Set<String> tableIds, FileSystem fs, Connector conn)
-      throws IOException {
+  public static Map<TreeSet<String>,Long> getDiskUsage(AccumuloConfiguration acuConf, Set<String> tableIds, FileSystem fs, Connector conn) throws IOException {
     TableDiskUsage tdu = new TableDiskUsage();
     
     for (String tableId : tableIds)
@@ -155,7 +155,7 @@ public class TableDiskUsage {
       } catch (TableNotFoundException e) {
         throw new RuntimeException(e);
       }
-      mdScanner.fetchColumnFamily(MetadataTable.DATAFILE_COLUMN_FAMILY);
+      mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
       mdScanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange());
       
       if (!mdScanner.iterator().hasNext()) {
@@ -264,13 +264,12 @@ public class TableDiskUsage {
       printer.print(String.format(valueFormat + " %s", value, entry.getKey()));
     }
   }
-
   
   static class Opts extends ClientOpts {
-    @Parameter(description=" <table> { <table> ... } ")
+    @Parameter(description = " <table> { <table> ... } ")
     List<String> tables = new ArrayList<String>();
   }
-    
+  
   /**
    * @param args
    */

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java b/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
index e5d60df..8dd414b 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/TabletIterator.java
@@ -30,7 +30,8 @@ import org.apache.accumulo.core.data.KeyExtent;
 import org.apache.accumulo.core.data.PartialKey;
 import org.apache.accumulo.core.data.Range;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 import org.apache.accumulo.core.util.UtilWaitThread;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
@@ -93,8 +94,8 @@ public class TabletIterator implements Iterator<Map<Key,Value>> {
     this.scanner = s;
     this.range = range;
     this.scanner.setRange(range);
-    MetadataTable.PREV_ROW_COLUMN.fetch(scanner);
-    MetadataTable.DIRECTORY_COLUMN.fetch(scanner);
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.fetch(scanner);
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(scanner);
     this.iter = s.iterator();
     this.returnPrevEndRow = returnPrevEndRow;
     this.returnDir = returnDir;
@@ -112,7 +113,7 @@ public class TabletIterator implements Iterator<Map<Key,Value>> {
       Key prevEndRowKey = currentTabletKeys.lastKey();
       Value prevEndRowValue = currentTabletKeys.get(prevEndRowKey);
       
-      if (!MetadataTable.PREV_ROW_COLUMN.hasColumns(prevEndRowKey)) {
+      if (!TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(prevEndRowKey)) {
         log.debug(currentTabletKeys);
         throw new RuntimeException("Unexpected key " + prevEndRowKey);
       }
@@ -176,11 +177,11 @@ public class TabletIterator implements Iterator<Map<Key,Value>> {
     
     while (esIter.hasNext()) {
       Map.Entry<Key,Value> entry = esIter.next();
-      if (!returnPrevEndRow && MetadataTable.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
+      if (!returnPrevEndRow && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
         esIter.remove();
       }
       
-      if (!returnDir && MetadataTable.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
+      if (!returnDir && TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(entry.getKey())) {
         esIter.remove();
       }
     }
@@ -216,7 +217,7 @@ public class TabletIterator implements Iterator<Map<Key,Value>> {
         
         tm.put(entry.getKey(), entry.getValue());
         
-        if (MetadataTable.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
+        if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(entry.getKey())) {
           sawPrevEndRow = true;
           break;
         }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java b/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
index def12c6..67220b5 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/VerifyTabletAssignments.java
@@ -22,9 +22,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
-import java.util.SortedSet;
 import java.util.TreeMap;
-import java.util.TreeSet;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
@@ -34,6 +32,7 @@ import org.apache.accumulo.core.client.AccumuloSecurityException;
 import org.apache.accumulo.core.client.Connector;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.client.impl.Tables;
 import org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.KeyExtent;
@@ -44,6 +43,7 @@ import org.apache.accumulo.core.data.thrift.MultiScanResult;
 import org.apache.accumulo.core.data.thrift.TColumn;
 import org.apache.accumulo.core.data.thrift.TKeyExtent;
 import org.apache.accumulo.core.data.thrift.TRange;
+import org.apache.accumulo.core.metadata.MetadataServicer;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
@@ -85,32 +85,36 @@ public class VerifyTabletAssignments {
     else
       System.out.println("Checking table " + tableName + " again, failures " + check.size());
     
-    Map<KeyExtent,String> locations = new TreeMap<KeyExtent,String>();
-    SortedSet<KeyExtent> tablets = new TreeSet<KeyExtent>();
+    TreeMap<KeyExtent,String> tabletLocations = new TreeMap<KeyExtent,String>();
     
     Connector conn = opts.getConnector();
     Instance inst = conn.getInstance();
-    MetadataTable.getEntries(conn.getInstance(), CredentialHelper.create(opts.principal, opts.getToken(), opts.instance), tableName, false, locations, tablets);
+    String tableId = Tables.getNameToIdMap(inst).get(tableName);
+    TCredentials credentials = CredentialHelper.create(opts.principal, opts.getToken(), opts.instance);
+    MetadataServicer.forTableId(conn.getInstance(), credentials, tableId).getTabletLocations(tabletLocations);
     
     final HashSet<KeyExtent> failures = new HashSet<KeyExtent>();
     
-    for (KeyExtent keyExtent : tablets)
-      if (!locations.containsKey(keyExtent))
-        System.out.println(" Tablet " + keyExtent + " has no location");
-      else if (opts.verbose)
-        System.out.println(" Tablet " + keyExtent + " is located at " + locations.get(keyExtent));
-    
     Map<String,List<KeyExtent>> extentsPerServer = new TreeMap<String,List<KeyExtent>>();
     
-    for (Entry<KeyExtent,String> entry : locations.entrySet()) {
-      List<KeyExtent> extentList = extentsPerServer.get(entry.getValue());
-      if (extentList == null) {
-        extentList = new ArrayList<KeyExtent>();
-        extentsPerServer.put(entry.getValue(), extentList);
-      }
+    for (Entry<KeyExtent,String> entry : tabletLocations.entrySet()) {
+      KeyExtent keyExtent = entry.getKey();
+      String loc = entry.getValue();
+      if (loc == null)
+        System.out.println(" Tablet " + keyExtent + " has no location");
+      else if (opts.verbose)
+        System.out.println(" Tablet " + keyExtent + " is located at " + loc);
       
-      if (check == null || check.contains(entry.getKey()))
-        extentList.add(entry.getKey());
+      if (loc != null) {
+        List<KeyExtent> extentList = extentsPerServer.get(loc);
+        if (extentList == null) {
+          extentList = new ArrayList<KeyExtent>();
+          extentsPerServer.put(loc, extentList);
+        }
+        
+        if (check == null || check.contains(keyExtent))
+          extentList.add(keyExtent);
+      }
     }
     
     ExecutorService tp = Executors.newFixedThreadPool(20);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java b/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
index 7ed3c2d..45d71c5 100644
--- a/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
+++ b/server/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
@@ -25,7 +25,8 @@ import java.util.List;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-import org.apache.accumulo.core.util.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Level;
@@ -53,12 +54,12 @@ public class MetadataConstraintsTest {
       };
     }
   }
-
+  
   @Test
   public void testCheck() {
     Logger.getLogger(AccumuloConfiguration.class).setLevel(Level.ERROR);
     Mutation m = new Mutation(new Text("0;foo"));
-    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("1foo".getBytes()));
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("1foo".getBytes()));
     
     MetadataConstraints mc = new MetadataConstraints();
     
@@ -69,7 +70,7 @@ public class MetadataConstraintsTest {
     assertEquals(Short.valueOf((short) 3), violations.get(0));
     
     m = new Mutation(new Text("0:foo"));
-    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
     
     violations = mc.check(null, m);
     
@@ -87,7 +88,7 @@ public class MetadataConstraintsTest {
     assertEquals(Short.valueOf((short) 2), violations.get(0));
     
     m = new Mutation(new Text("!!<"));
-    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("1poo".getBytes()));
     
     violations = mc.check(null, m);
     
@@ -97,7 +98,7 @@ public class MetadataConstraintsTest {
     assertEquals(Short.valueOf((short) 5), violations.get(1));
     
     m = new Mutation(new Text("0;foo"));
-    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("".getBytes()));
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("".getBytes()));
     
     violations = mc.check(null, m);
     
@@ -106,28 +107,28 @@ public class MetadataConstraintsTest {
     assertEquals(Short.valueOf((short) 6), violations.get(0));
     
     m = new Mutation(new Text("0;foo"));
-    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
     
     violations = mc.check(null, m);
     
     assertEquals(null, violations);
     
     m = new Mutation(new Text("!0<"));
-    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
     
     violations = mc.check(null, m);
     
     assertEquals(null, violations);
     
     m = new Mutation(new Text("!1<"));
-    MetadataTable.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
+    TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, new Value("bar".getBytes()));
     
     violations = mc.check(null, m);
     
     assertNotNull(violations);
     assertEquals(1, violations.size());
     assertEquals(Short.valueOf((short) 4), violations.get(0));
-
+    
   }
   
   @Test
@@ -135,20 +136,20 @@ public class MetadataConstraintsTest {
     MetadataConstraints mc = new TestMetadataConstraints();
     Mutation m;
     List<Short> violations;
-
+    
     // inactive txid
     m = new Mutation(new Text("0;foo"));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
-    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
+    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
     violations = mc.check(null, m);
     assertNotNull(violations);
     assertEquals(1, violations.size());
-    assertEquals(Short.valueOf((short)8), violations.get(0));
+    assertEquals(Short.valueOf((short) 8), violations.get(0));
     
     // txid that throws exception
     m = new Mutation(new Text("0;foo"));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("9".getBytes()));
-    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("9".getBytes()));
+    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
     violations = mc.check(null, m);
     assertNotNull(violations);
     assertEquals(1, violations.size());
@@ -156,14 +157,14 @@ public class MetadataConstraintsTest {
     
     // active txid w/ file
     m = new Mutation(new Text("0;foo"));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
     
     // active txid w/o file
     m = new Mutation(new Text("0;foo"));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
     violations = mc.check(null, m);
     assertNotNull(violations);
     assertEquals(1, violations.size());
@@ -171,69 +172,68 @@ public class MetadataConstraintsTest {
     
     // two active txids w/ files
     m = new Mutation(new Text("0;foo"));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("7".getBytes()));
-    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("1,1".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("7".getBytes()));
+    m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new Value("1,1".getBytes()));
     violations = mc.check(null, m);
     assertNotNull(violations);
     assertEquals(1, violations.size());
     assertEquals(Short.valueOf((short) 8), violations.get(0));
-
+    
     // two files w/ one active txid
     m = new Mutation(new Text("0;foo"));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("5".getBytes()));
-    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("1,1".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("5".getBytes()));
+    m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new Value("1,1".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
-
+    
     // two loaded w/ one active txid and one file
     m = new Mutation(new Text("0;foo"));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(MetadataTable.DATAFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("1,1".getBytes()));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile2"), new Value("5".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new Value("1,1".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), new Value("5".getBytes()));
     violations = mc.check(null, m);
     assertNotNull(violations);
     assertEquals(1, violations.size());
     assertEquals(Short.valueOf((short) 8), violations.get(0));
-
+    
     // active txid, mutation that looks like split
     m = new Mutation(new Text("0;foo"));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    MetadataTable.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
     
     // inactive txid, mutation that looks like split
     m = new Mutation(new Text("0;foo"));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
-    MetadataTable.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
+    TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
     
     // active txid, mutation that looks like a load
     m = new Mutation(new Text("0;foo"));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("5".getBytes()));
-    m.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5".getBytes()));
+    m.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
     
     // inactive txid, mutation that looks like a load
     m = new Mutation(new Text("0;foo"));
-    m.put(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"), new Value("12345".getBytes()));
-    m.put(MetadataTable.CURRENT_LOCATION_COLUMN_FAMILY, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
+    m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("12345".getBytes()));
+    m.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("789"), new Value("127.0.0.1:9997".getBytes()));
     violations = mc.check(null, m);
     assertNull(violations);
     
     // deleting a load flag
     m = new Mutation(new Text("0;foo"));
-    m.putDelete(MetadataTable.BULKFILE_COLUMN_FAMILY, new Text("/someFile"));
+    m.putDelete(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"));
     violations = mc.check(null, m);
     assertNull(violations);
-
-
+    
   }
   
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/446a37a9/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
----------------------------------------------------------------------
diff --git a/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java b/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
index 368da86..9ac0b50 100644
--- a/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
+++ b/server/src/test/java/org/apache/accumulo/server/gc/TestConfirmDeletes.java
@@ -31,10 +31,10 @@ import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.data.Key;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
 import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.CredentialHelper;
 import org.apache.accumulo.core.security.thrift.TCredentials;
-import org.apache.accumulo.core.util.MetadataTable;
 import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.hadoop.io.Text;


[20/50] [abbrv] git commit: ACCUMULO-1557 applying Jonathan Hsieh's patch

Posted by ct...@apache.org.
ACCUMULO-1557 applying Jonathan Hsieh's patch

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/1.5@1500878 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/d7a7fbcd
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/d7a7fbcd
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/d7a7fbcd

Branch: refs/heads/ACCUMULO-1496
Commit: d7a7fbcddce486aef1b59146dc530c0cf8bf2465
Parents: 86f1a22
Author: Eric C. Newton <ec...@apache.org>
Authored: Mon Jul 8 18:48:45 2013 +0000
Committer: Eric C. Newton <ec...@apache.org>
Committed: Mon Jul 8 18:48:45 2013 +0000

----------------------------------------------------------------------
 test/system/auto/simple/zooCacheTest.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/d7a7fbcd/test/system/auto/simple/zooCacheTest.py
----------------------------------------------------------------------
diff --git a/test/system/auto/simple/zooCacheTest.py b/test/system/auto/simple/zooCacheTest.py
index e1e4bce..fcbb3ab 100755
--- a/test/system/auto/simple/zooCacheTest.py
+++ b/test/system/auto/simple/zooCacheTest.py
@@ -14,7 +14,7 @@
 # limitations under the License.
 
 import os
-
+import shutil
 import unittest
 import time
 
@@ -30,6 +30,7 @@ class ZooCacheTest(TestUtilsMixin, unittest.TestCase):
         self.create_config_file(self.settings.copy())
         
     def runTest(self):
+        shutil.rmtree('/tmp/zcTest-42')
         handleCC = self.runClassOn('localhost', 'org.apache.accumulo.test.functional.CacheTestClean', ['/zcTest-42','/tmp/zcTest-42'])
         self.waitForStop(handleCC, 10)
         handleR1 = self.runClassOn('localhost', 'org.apache.accumulo.test.functional.CacheTestReader', ['/zcTest-42','/tmp/zcTest-42', ZOOKEEPERS])


[44/50] [abbrv] git commit: ACCUMULO-1496 slightly more optimized version using scannotation

Posted by ct...@apache.org.
ACCUMULO-1496 slightly more optimized version using scannotation

git-svn-id: https://svn.apache.org/repos/asf/accumulo/branches/ACCUMULO-1496@1491920 13f79535-47bb-0310-9956-ffa450edef68


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/424caaa1
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/424caaa1
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/424caaa1

Branch: refs/heads/ACCUMULO-1496
Commit: 424caaa1356dc47be8e8c63b875b3121a6e6bc89
Parents: dd0b97e
Author: Christopher Tubbs <ct...@apache.org>
Authored: Tue Jun 11 19:33:53 2013 +0000
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Tue Jul 16 15:02:12 2013 -0400

----------------------------------------------------------------------
 api/pom.xml                                     | 27 --------
 .../api/annotations/AccumuloService.java        | 33 ----------
 assemble/pom.xml                                |  4 --
 bin/accumulo                                    |  6 +-
 .../accumulo/core/file/rfile/PrintInfo.java     |  2 +-
 .../apache/accumulo/core/util/CreateToken.java  |  2 +-
 .../apache/accumulo/core/util/shell/Shell.java  |  2 +-
 .../minicluster/MiniAccumuloRunner.java         |  2 +-
 pom.xml                                         |  6 --
 .../java/org/apache/accumulo/proxy/Proxy.java   |  2 +-
 .../server/gc/SimpleGarbageCollector.java       |  2 +-
 .../apache/accumulo/server/master/Master.java   |  2 +-
 .../apache/accumulo/server/monitor/Monitor.java |  2 +-
 .../server/tabletserver/TabletServer.java       |  2 +-
 .../accumulo/server/trace/TraceServer.java      |  2 +-
 .../org/apache/accumulo/server/util/Admin.java  |  2 +-
 .../apache/accumulo/server/util/Initialize.java |  2 +-
 .../accumulo/server/util/ZooKeeperMain.java     |  3 +-
 start/pom.xml                                   |  4 --
 .../java/org/apache/accumulo/start/Main.java    | 47 +++++---------
 .../start/annotations/AccumuloService.java      | 33 ++++++++++
 .../classloader/vfs/AccumuloVFSClassLoader.java | 68 ++++++++++++++++++--
 22 files changed, 128 insertions(+), 127 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/api/pom.xml
----------------------------------------------------------------------
diff --git a/api/pom.xml b/api/pom.xml
deleted file mode 100644
index 9fe5f60..0000000
--- a/api/pom.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.accumulo</groupId>
-    <artifactId>accumulo-project</artifactId>
-    <version>1.6.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>accumulo-api</artifactId>
-  <name>API</name>
-</project>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/api/src/main/java/org/apache/accumulo/api/annotations/AccumuloService.java
----------------------------------------------------------------------
diff --git a/api/src/main/java/org/apache/accumulo/api/annotations/AccumuloService.java b/api/src/main/java/org/apache/accumulo/api/annotations/AccumuloService.java
deleted file mode 100644
index 31fbc67..0000000
--- a/api/src/main/java/org/apache/accumulo/api/annotations/AccumuloService.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.accumulo.api.annotations;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * Annotation to describe a class that can be launched by Accumulo
- * 
- * @since 1.6.0
- */
-@Retention(RetentionPolicy.RUNTIME)
-@Target(ElementType.TYPE)
-public @interface AccumuloService {
-  String value();
-}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/assemble/pom.xml
----------------------------------------------------------------------
diff --git a/assemble/pom.xml b/assemble/pom.xml
index 7584510..96a1094 100644
--- a/assemble/pom.xml
+++ b/assemble/pom.xml
@@ -52,10 +52,6 @@
     </dependency>
     <dependency>
       <groupId>org.apache.accumulo</groupId>
-      <artifactId>accumulo-api</artifactId>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.accumulo</groupId>
       <artifactId>accumulo-core</artifactId>
     </dependency>
     <dependency>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/bin/accumulo
----------------------------------------------------------------------
diff --git a/bin/accumulo b/bin/accumulo
index a3b0a21..a55e579 100755
--- a/bin/accumulo
+++ b/bin/accumulo
@@ -29,9 +29,7 @@ script=$( basename "$SOURCE" )
 . "$bin"/config.sh
 
 START_JAR=$ACCUMULO_HOME/lib/accumulo-start.jar
-API_JAR=$ACCUMULO_HOME/lib/accumulo-api.jar
-SCANNOTATION_JAR=$ACCUMULO_HOME/lib/scannotation.jar
-JAVASSIST_JAR=$ACCUMULO_HOME/lib/javassist.jar
+SCANNOTATION_JARS=$ACCUMULO_HOME/lib/scannotation.jar:$ACCUMULO_HOME/lib/javassist.jar
 
 #
 # Resolve a program to its installation directory
@@ -84,7 +82,7 @@ esac
 
 XML_FILES=${ACCUMULO_HOME}/conf
 LOG4J_JAR=$(find $HADOOP_PREFIX/lib $HADOOP_PREFIX/share/hadoop/common/lib -name 'log4j*.jar' -print 2>/dev/null | head -1)
-CLASSPATH=${XML_FILES}:${API_JAR}:${JAVASSIST_JAR}:${SCANNOTATION_JAR}:${START_JAR}:${LOG4J_JAR}
+CLASSPATH=${XML_FILES}:${SCANNOTATION_JARS}:${START_JAR}:${LOG4J_JAR}
 
 if [ -z "$JAVA_HOME" -o ! -d "$JAVA_HOME" ]; then
    echo "JAVA_HOME is not set or is not a directory.  Please make sure it's set globally or in conf/accumulo-env.sh"

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
index 2f06474..029a3f5 100644
--- a/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/rfile/PrintInfo.java
@@ -19,7 +19,6 @@ package org.apache.accumulo.core.file.rfile;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.data.ByteSequence;
@@ -29,6 +28,7 @@ import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.file.FileUtil;
 import org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile;
 import org.apache.accumulo.core.file.rfile.RFile.Reader;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java b/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
index 5b25424..1617626 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/CreateToken.java
@@ -23,7 +23,6 @@ import java.io.PrintStream;
 
 import jline.console.ConsoleReader;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.ClientOpts.Password;
 import org.apache.accumulo.core.cli.ClientOpts.PasswordConverter;
@@ -33,6 +32,7 @@ import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.Prope
 import org.apache.accumulo.core.client.security.tokens.AuthenticationToken.TokenProperty;
 import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 import org.apache.accumulo.core.security.CredentialHelper;
+import org.apache.accumulo.start.annotations.AccumuloService;
 
 import com.beust.jcommander.Parameter;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
index 129fff7..92e09b7 100644
--- a/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/shell/Shell.java
@@ -37,7 +37,6 @@ import java.util.UUID;
 import jline.console.ConsoleReader;
 import jline.console.history.FileHistory;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -142,6 +141,7 @@ import org.apache.accumulo.core.util.shell.commands.UserPermissionsCommand;
 import org.apache.accumulo.core.util.shell.commands.UsersCommand;
 import org.apache.accumulo.core.util.shell.commands.WhoAmICommand;
 import org.apache.accumulo.fate.zookeeper.ZooReader;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.commons.cli.BasicParser;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.HelpFormatter;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
----------------------------------------------------------------------
diff --git a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
index d3b2f94..e6c3971 100644
--- a/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
+++ b/minicluster/src/main/java/org/apache/accumulo/minicluster/MiniAccumuloRunner.java
@@ -27,9 +27,9 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.regex.Pattern;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.util.Pair;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.commons.io.FileUtils;
 
 import com.beust.jcommander.IStringConverter;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 6d6539c..f1534f7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -72,7 +72,6 @@
     <maven>${maven.min-version}</maven>
   </prerequisites>
   <modules>
-    <module>api</module>
     <module>trace</module>
     <module>core</module>
     <module>fate</module>
@@ -213,11 +212,6 @@
       </dependency>
       <dependency>
         <groupId>org.apache.accumulo</groupId>
-        <artifactId>accumulo-api</artifactId>
-        <version>${project.version}</version>
-      </dependency>
-      <dependency>
-        <groupId>org.apache.accumulo</groupId>
         <artifactId>accumulo-core</artifactId>
         <version>${project.version}</version>
       </dependency>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
----------------------------------------------------------------------
diff --git a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
index 9552456..4e9b256 100644
--- a/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
+++ b/proxy/src/main/java/org/apache/accumulo/proxy/Proxy.java
@@ -23,11 +23,11 @@ import java.io.InputStream;
 import java.lang.reflect.Constructor;
 import java.util.Properties;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.minicluster.MiniAccumuloCluster;
 import org.apache.accumulo.proxy.thrift.AccumuloProxy;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TProcessor;
 import org.apache.thrift.protocol.TCompactProtocol;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java b/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
index 864717f..4c717d6 100644
--- a/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
+++ b/server/src/main/java/org/apache/accumulo/server/gc/SimpleGarbageCollector.java
@@ -36,7 +36,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -91,6 +90,7 @@ import org.apache.accumulo.server.util.Halt;
 import org.apache.accumulo.server.util.TServerUtils;
 import org.apache.accumulo.server.util.TabletIterator;
 import org.apache.accumulo.server.zookeeper.ZooLock;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.accumulo.trace.instrument.CountSampler;
 import org.apache.accumulo.trace.instrument.Sampler;
 import org.apache.accumulo.trace.instrument.Span;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/server/src/main/java/org/apache/accumulo/server/master/Master.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/master/Master.java b/server/src/main/java/org/apache/accumulo/server/master/Master.java
index fa8a7e5..30894fd 100644
--- a/server/src/main/java/org/apache/accumulo/server/master/Master.java
+++ b/server/src/main/java/org/apache/accumulo/server/master/Master.java
@@ -34,7 +34,6 @@ import java.util.TreeMap;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -143,6 +142,7 @@ import org.apache.accumulo.server.util.TabletIterator.TabletDeletedException;
 import org.apache.accumulo.server.util.time.SimpleTimer;
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 import org.apache.accumulo.trace.instrument.thrift.TraceWrap;
 import org.apache.accumulo.trace.thrift.TInfo;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java b/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
index 6069f64..6bca1a3 100644
--- a/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
+++ b/server/src/main/java/org/apache/accumulo/server/monitor/Monitor.java
@@ -27,7 +27,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.client.impl.MasterClient;
@@ -73,6 +72,7 @@ import org.apache.accumulo.server.problems.ProblemReports;
 import org.apache.accumulo.server.problems.ProblemType;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.util.EmbeddedWebServer;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.log4j.Logger;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
index 9ddf6bb..bdf7947 100644
--- a/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/tabletserver/TabletServer.java
@@ -62,7 +62,6 @@ import java.util.concurrent.atomic.AtomicReference;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -198,6 +197,7 @@ import org.apache.accumulo.server.zookeeper.ZooCache;
 import org.apache.accumulo.server.zookeeper.ZooLock;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
 import org.apache.accumulo.start.Platform;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader;
 import org.apache.accumulo.start.classloader.vfs.ContextManager;
 import org.apache.accumulo.trace.instrument.Span;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java b/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
index 841166d..09cebd1 100644
--- a/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
+++ b/server/src/main/java/org/apache/accumulo/server/trace/TraceServer.java
@@ -23,7 +23,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.BatchWriterConfig;
@@ -52,6 +51,7 @@ import org.apache.accumulo.server.fs.VolumeManager;
 import org.apache.accumulo.server.fs.VolumeManagerImpl;
 import org.apache.accumulo.server.util.time.SimpleTimer;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.accumulo.start.classloader.AccumuloClassLoader;
 import org.apache.accumulo.trace.instrument.Span;
 import org.apache.accumulo.trace.thrift.RemoteSpan;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/server/src/main/java/org/apache/accumulo/server/util/Admin.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/Admin.java b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
index a415de7..88dca6f 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/Admin.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/Admin.java
@@ -22,7 +22,6 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.client.AccumuloException;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -39,6 +38,7 @@ import org.apache.accumulo.core.security.thrift.TCredentials;
 import org.apache.accumulo.server.cli.ClientOpts;
 import org.apache.accumulo.server.client.HdfsZooInstance;
 import org.apache.accumulo.server.security.SecurityConstants;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.log4j.Logger;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/Initialize.java b/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
index af053aa..288a135 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/Initialize.java
@@ -28,7 +28,6 @@ import java.util.UUID;
 
 import jline.console.ConsoleReader;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.client.AccumuloSecurityException;
@@ -68,6 +67,7 @@ import org.apache.accumulo.server.security.AuditedSecurityOperation;
 import org.apache.accumulo.server.security.SecurityConstants;
 import org.apache.accumulo.server.tabletserver.TabletTime;
 import org.apache.accumulo.server.zookeeper.ZooReaderWriter;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
----------------------------------------------------------------------
diff --git a/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java b/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
index dc01915..730f421 100644
--- a/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
+++ b/server/src/main/java/org/apache/accumulo/server/util/ZooKeeperMain.java
@@ -16,13 +16,14 @@
  */
 package org.apache.accumulo.server.util;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.core.Constants;
 import org.apache.accumulo.core.cli.Help;
 import org.apache.accumulo.core.client.Instance;
 import org.apache.accumulo.core.util.CachedConfiguration;
 import org.apache.accumulo.server.ServerConstants;
 import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.conf.ServerConfiguration;
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/start/pom.xml
----------------------------------------------------------------------
diff --git a/start/pom.xml b/start/pom.xml
index 133413e..61496bf 100644
--- a/start/pom.xml
+++ b/start/pom.xml
@@ -30,10 +30,6 @@
       <artifactId>scannotation</artifactId>
     </dependency>
     <dependency>
-      <groupId>org.apache.accumulo</groupId>
-      <artifactId>accumulo-api</artifactId>
-    </dependency>
-    <dependency>
       <groupId>org.apache.commons</groupId>
       <artifactId>commons-vfs2</artifactId>
     </dependency>

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/start/src/main/java/org/apache/accumulo/start/Main.java
----------------------------------------------------------------------
diff --git a/start/src/main/java/org/apache/accumulo/start/Main.java b/start/src/main/java/org/apache/accumulo/start/Main.java
index 3821c3f..98ca303 100644
--- a/start/src/main/java/org/apache/accumulo/start/Main.java
+++ b/start/src/main/java/org/apache/accumulo/start/Main.java
@@ -16,22 +16,15 @@
  */
 package org.apache.accumulo.start;
 
-import java.io.IOException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
-import java.net.URL;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.Set;
 
-import org.apache.accumulo.api.annotations.AccumuloService;
 import org.apache.accumulo.start.classloader.AccumuloClassLoader;
-import org.scannotation.AnnotationDB;
 
 public class Main {
   
-  private static AnnotationDB annotationDatabase;
-  
   public static void main(String[] args) throws Exception {
     Runnable r = null;
     
@@ -41,10 +34,8 @@ public class Main {
       ClassLoader cl = (ClassLoader) vfsClassLoader.getMethod("getClassLoader", new Class[] {}).invoke(null, new Object[] {});
       Thread.currentThread().setContextClassLoader(cl);
       
-      URL[] urls = (URL[]) vfsClassLoader.getMethod("getURLs", new Class[] {}).invoke(null, new Object[] {});
-      
       if (args.length == 0) {
-        printUsage(cl, urls);
+        printUsage();
         System.exit(1);
       }
       final String argsToPass[] = new String[args.length - 1];
@@ -60,14 +51,7 @@ public class Main {
         System.out.println(runTMP.getField("VERSION").get(null));
         return;
       } else {
-        for (String className : loadAnnotationDB(urls, AccumuloService.class)) {
-          Class<?> runTMPCandidate = cl.loadClass(className);
-          if (args[0].equals(runTMPCandidate.getAnnotation(AccumuloService.class).value())) {
-            runTMP = runTMPCandidate;
-            break;
-          }
-        }
-        
+        runTMP = getAccumuloServiceClassByKeyword(args[0]);
         if (runTMP == null) {
           try {
             runTMP = cl.loadClass(args[0]);
@@ -111,11 +95,10 @@ public class Main {
     }
   }
   
-  private static void printUsage(ClassLoader cl, URL[] urls) throws IOException, ClassNotFoundException {
+  private static void printUsage() throws Exception {
     ArrayList<String> keywords = new ArrayList<String>(20);
-    for (String className : loadAnnotationDB(urls, AccumuloService.class)) {
-      Class<?> runTMPCandidate = cl.loadClass(className);
-      keywords.add(runTMPCandidate.getAnnotation(AccumuloService.class).value());
+    for (String keyword : getAccumuloServiceKeywords()) {
+      keywords.add(keyword);
     }
     keywords.add("classpath");
     keywords.add("version");
@@ -129,14 +112,16 @@ public class Main {
     System.out.println("accumulo " + kwString + " | <accumulo class> args");
   }
   
-  protected synchronized static Set<String> loadAnnotationDB(URL[] urls, Class<?> annotationClass) throws IOException {
-    if (annotationDatabase == null) {
-      AnnotationDB database = new AnnotationDB();
-      database.setScanClassAnnotations(true);
-      database.scanArchives(urls);
-      annotationDatabase = database;
-    }
-    Set<String> retVal = annotationDatabase.getAnnotationIndex().get(annotationClass.getName());
-    return retVal == null ? (retVal = Collections.emptySet()) : retVal;
+  private static Set<String> getAccumuloServiceKeywords() throws Exception {
+    Class<?> vfsClassLoader = AccumuloClassLoader.getClassLoader().loadClass("org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader");
+    @SuppressWarnings("unchecked")
+    Set<String> keywords = (Set<String>) vfsClassLoader.getMethod("getAccumuloServiceKeywords").invoke(null);
+    return keywords;
+  }
+  
+  private static Class<?> getAccumuloServiceClassByKeyword(String keyword) throws Exception {
+    Class<?> vfsClassLoader = AccumuloClassLoader.getClassLoader().loadClass("org.apache.accumulo.start.classloader.vfs.AccumuloVFSClassLoader");
+    Class<?> serviceClass = (Class<?>) vfsClassLoader.getMethod("getAccumuloServiceClassByKeyword", String.class).invoke(null, keyword);
+    return serviceClass;
   }
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/start/src/main/java/org/apache/accumulo/start/annotations/AccumuloService.java
----------------------------------------------------------------------
diff --git a/start/src/main/java/org/apache/accumulo/start/annotations/AccumuloService.java b/start/src/main/java/org/apache/accumulo/start/annotations/AccumuloService.java
new file mode 100644
index 0000000..41095ee
--- /dev/null
+++ b/start/src/main/java/org/apache/accumulo/start/annotations/AccumuloService.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.start.annotations;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Annotation to describe a class that can be launched by Accumulo
+ * 
+ * @since 1.6.0
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE)
+public @interface AccumuloService {
+  String value();
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/424caaa1/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
----------------------------------------------------------------------
diff --git a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
index 0ff8843..a658829 100644
--- a/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
+++ b/start/src/main/java/org/apache/accumulo/start/classloader/vfs/AccumuloVFSClassLoader.java
@@ -24,7 +24,10 @@ import java.net.URLClassLoader;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
 
+import org.apache.accumulo.start.annotations.AccumuloService;
 import org.apache.accumulo.start.classloader.AccumuloClassLoader;
 import org.apache.accumulo.start.classloader.vfs.providers.HdfsFileProvider;
 import org.apache.commons.vfs2.CacheStrategy;
@@ -37,6 +40,7 @@ import org.apache.commons.vfs2.impl.DefaultFileSystemManager;
 import org.apache.commons.vfs2.impl.FileContentInfoFilenameFactory;
 import org.apache.commons.vfs2.impl.VFSClassLoader;
 import org.apache.log4j.Logger;
+import org.scannotation.AnnotationDB;
 
 /**
  * This class builds a hierarchy of Classloaders in the form of:
@@ -283,19 +287,73 @@ public class AccumuloVFSClassLoader {
     });
   }
   
-  public static URL[] getURLs() {
+  private static AnnotationDB annotationDatabase;
+  
+  private static void loadAnnotationDatabase() {
+    if (annotationDatabase == null) {
+      AnnotationDB database = new AnnotationDB();
+      database.setScanClassAnnotations(true);
+      try {
+        database.scanArchives(getAccumuloServiceURLs());
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+      annotationDatabase = database;
+    }
+  }
+  
+  public static Set<String> getAccumuloServiceClasses() {
+    loadAnnotationDatabase();
+    Set<String> retVal = annotationDatabase.getAnnotationIndex().get(AccumuloService.class.getName());
+    if (retVal == null)
+      retVal = Collections.emptySet();
+    return retVal;
+  }
+  
+  public static Class<?> getAccumuloServiceClassByKeyword(String keyword) {
+    Set<String> classNames = getAccumuloServiceClasses();
+    for (String className : classNames) {
+      try {
+        Class<?> candidate = getClassLoader().loadClass(className);
+        if (candidate.getAnnotation(AccumuloService.class).value().equals(keyword))
+          return candidate;
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+    }
+    return null;
+  }
+  
+  public static Set<String> getAccumuloServiceKeywords() {
+    TreeSet<String> keywords = new TreeSet<String>();
+    Set<String> classNames = getAccumuloServiceClasses();
+    for (String className : classNames) {
+      try {
+        keywords.add(getClassLoader().loadClass(className).getAnnotation(AccumuloService.class).value());
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+    }
+    return keywords;
+  }
+  
+  public static URL[] getAccumuloServiceURLs() {
     ArrayList<URL> urls = new ArrayList<URL>(20);
     try {
       ClassLoader cl = getClassLoader();
       while (cl != null && cl != ClassLoader.getSystemClassLoader()) {
         if (cl instanceof URLClassLoader) {
           URLClassLoader ucl = (URLClassLoader) cl;
-          for (URL u : ucl.getURLs())
-            urls.add(u);
+          for (URL u : ucl.getURLs()) {
+            if (u.toExternalForm().contains("accumulo"))
+              urls.add(u);
+          }
         } else if (cl instanceof VFSClassLoader) {
           VFSClassLoader vcl = (VFSClassLoader) cl;
-          for (FileObject f : vcl.getFileObjects())
-            urls.add(f.getURL());
+          for (FileObject f : vcl.getFileObjects()) {
+            if (f.getURL().toExternalForm().contains("accumulo"))
+              urls.add(f.getURL());
+          }
         }
         cl = cl.getParent();
       }