You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@accumulo.apache.org by GitBox <gi...@apache.org> on 2018/11/02 22:16:48 UTC

[GitHub] keith-turner closed pull request #29: Add formatting to pom

keith-turner closed pull request #29: Add formatting to pom
URL: https://github.com/apache/accumulo-examples/pull/29
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/build/Eclipse-Accumulo-Codestyle.xml b/build/Eclipse-Accumulo-Codestyle.xml
new file mode 100644
index 0000000..3b04c4d
--- /dev/null
+++ b/build/Eclipse-Accumulo-Codestyle.xml
@@ -0,0 +1,307 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<profiles version="12">
+<profile kind="CodeFormatterProfile" name="Accumulo" version="12">
+<setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_before_root_tags" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.disabling_tag" value="@formatter:off"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_annotation" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_anonymous_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_case" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_brace_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.new_lines_at_block_boundaries" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_annotation_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_closing_brace_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_field" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_while" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.use_on_off_tags" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_annotation_type_member_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_else_in_if_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_prefix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_else_statement_on_same_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_ellipsis" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.insert_new_line_for_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_annotation_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_breaks_compare_to_cases" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_multiple_fields" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_expressions_in_array_initializer" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_conditional_expression" value="80"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_binary_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_wildcard" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_array_initializer" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_finally_in_try_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_local_variable" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_catch_in_try_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_while" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_after_package" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.continuation_indentation" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_postfix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_method_invocation" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_superinterfaces" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_new_chunk" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_binary_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_package" value="0"/>
+<setting id="org.eclipse.jdt.core.compiler.source" value="1.7"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_constant_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_line_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_enum_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.join_wrapped_lines" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_block" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_explicit_constructor_call" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_invocation_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_member_type" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.align_type_members_on_columns" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_method_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_selector_in_method_invocation" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_switch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_unary_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_case" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.indent_parameter_description" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_switch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_block_comment" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_type_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.lineSplit" value="100"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_if" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_brackets_in_array_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_explicitconstructorcall_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_constructor_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_first_class_body_declaration" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_method" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indentation.size" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.enabling_tag" value="@formatter:on"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superclass_in_type_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_assignment" value="0"/>
+<setting id="org.eclipse.jdt.core.compiler.problem.assertIdentifier" value="error"/>
+<setting id="org.eclipse.jdt.core.formatter.tabulation.char" value="space"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_try_resources" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_prefix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_body" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_method" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.wrap_outer_expressions_when_nested" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.format_guardian_clause_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_cast" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_constructor_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_labeled_statement" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_annotation_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_method_body" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_method_declaration" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_try" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_enum_constant" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_at_in_annotation_type_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_throws" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_if" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_switch" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_throws" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_return" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_question_in_wildcard" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_try" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.preserve_white_space_between_code_and_line_comments" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_parenthesized_expression_in_throw" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.compiler.problem.enumIdentifier" value="error"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_switch" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_ellipsis" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_block" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_inits" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_method_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.compact_else_if" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.wrap_before_or_operator_multicatch" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_increments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.format_line_comment_starting_on_first_column" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_field" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_constant" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.indent_root_tags" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_union_type_in_multicatch" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_explicitconstructorcall_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_switch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_method_declaration_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_superinterfaces" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.tabulation.size" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_opening_brace_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_brace_in_block" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_throws" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_if" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.clear_blank_lines_in_javadoc_comment" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_constructor_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_assignment_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_assignment_operator" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_empty_lines" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_synchronized" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_paren_in_cast" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_declaration_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_block_in_case" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.number_of_empty_lines_to_preserve" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_catch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_bracket_in_array_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_qualified_allocation_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_and_in_type_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.compiler.compliance" value="1.7"/>
+<setting id="org.eclipse.jdt.core.formatter.continuation_indentation_for_array_initializer" value="2"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_brackets_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_at_in_annotation_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_allocation_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_cast" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_unary_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_anonymous_type_declaration" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_empty_array_initializer_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_enum_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_imple_if_on_one_line" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_constructor_declaration_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_closing_angle_bracket_in_type_parameters" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_at_end_of_file_if_missing" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_labeled_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_type_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_binary_expression" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_enum_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_type" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_while" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode" value="enabled"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_try" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.put_empty_statement_on_new_line" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_label" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_parameter" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_before_while_in_do_statement" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_enum_constant" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_javadoc_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.line_length" value="100"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_after_annotation_on_package" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_between_import_groups" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_enum_constant_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_constructor_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.number_of_blank_lines_at_beginning_of_method_body" value="0"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_type_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_annotation_type_member_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.wrap_before_binary_operator" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_declaration_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_between_type_declarations" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_synchronized" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_statements_compare_to_block" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_superinterfaces_in_enum_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.join_lines_in_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_question_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_field_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_compact_if" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_for_inits" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_switchstatements_compare_to_cases" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_default" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_and_in_type_parameter" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_parens_in_constructor_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_before_imports" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_assert" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_html" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_throws_clause_in_method_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_angle_bracket_in_type_parameters" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_bracket_in_array_allocation_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_anonymous_type_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_colon_in_conditional" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_postfix_operator" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_source_code" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_synchronized" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_allocation_expression" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_constructor_declaration_throws" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_parameters_in_method_declaration" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_brace_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.compiler.codegen.targetPlatform" value="1.7"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_resources_in_try" value="80"/>
+<setting id="org.eclipse.jdt.core.formatter.use_tabs_only_for_leading_indentations" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_arguments_in_annotation" value="16"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_header" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.format_block_comments" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_enum_constant" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.alignment_for_enum_constants" value="48"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_new_line_in_empty_block" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_annotation_declaration_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_parenthesized_expression" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_catch" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_multiple_local_declarations" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_switch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_comma_in_for_increments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_invocation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_colon_in_assert" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.brace_position_for_type_declaration" value="end_of_line"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_brace_in_array_initializer" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_between_empty_braces_in_array_initializer" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_opening_paren_in_method_declaration" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_for" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_paren_in_catch" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_field_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_closing_paren_in_annotation" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_parameterized_type_reference" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_method_invocation_arguments" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.comment.new_lines_at_javadoc_boundaries" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.blank_lines_after_imports" value="1"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_comma_in_multiple_local_declarations" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.indent_body_declarations_compare_to_enum_constant_header" value="true"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_after_semicolon_in_for" value="insert"/>
+<setting id="org.eclipse.jdt.core.formatter.never_indent_line_comments_on_first_column" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_semicolon_in_try_resources" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.insert_space_before_opening_angle_bracket_in_type_arguments" value="do not insert"/>
+<setting id="org.eclipse.jdt.core.formatter.never_indent_block_comments_on_first_column" value="false"/>
+<setting id="org.eclipse.jdt.core.formatter.keep_then_statement_on_same_line" value="false"/>
+</profile>
+</profiles>
diff --git a/checkstyle.xml b/build/checkstyle.xml
similarity index 100%
rename from checkstyle.xml
rename to build/checkstyle.xml
diff --git a/pom.xml b/pom.xml
index 1e3fe68..649f698 100644
--- a/pom.xml
+++ b/pom.xml
@@ -17,29 +17,25 @@
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
   <modelVersion>4.0.0</modelVersion>
-
   <parent>
     <groupId>org.apache</groupId>
     <artifactId>apache</artifactId>
     <version>18</version>
   </parent>
-
   <groupId>org.apache.accumulo</groupId>
   <artifactId>accumulo-examples</artifactId>
   <version>2.0.0-SNAPSHOT</version>
   <packaging>jar</packaging>
-
   <name>Apache Accumulo Examples</name>
   <description>Example code and corresponding documentation for using Apache Accumulo</description>
-
   <properties>
     <accumulo.version>2.0.0-alpha-1</accumulo.version>
     <hadoop.version>3.1.1</hadoop.version>
     <slf4j.version>1.7.21</slf4j.version>
     <maven.compiler.source>1.8</maven.compiler.source>
     <maven.compiler.target>1.8</maven.compiler.target>
+    <eclipseFormatterStyle>build/Eclipse-Accumulo-Codestyle.xml</eclipseFormatterStyle>
   </properties>
-
   <dependencyManagement>
     <dependencies>
       <dependency>
@@ -49,84 +45,6 @@
       </dependency>
     </dependencies>
   </dependencyManagement>
-
-  <build>
-    <pluginManagement>
-      <plugins>
-        <plugin>
-          <!-- Allows us to get the apache-ds bundle artifacts -->
-          <groupId>org.apache.felix</groupId>
-          <artifactId>maven-bundle-plugin</artifactId>
-          <version>3.0.1</version>
-        </plugin>
-      </plugins>
-    </pluginManagement>
-    <plugins>
-      <plugin>
-        <artifactId>maven-compiler-plugin</artifactId>
-        <version>3.1</version>
-        <configuration>
-          <source>${maven.compiler.source}</source>
-          <target>${maven.compiler.target}</target>
-          <optimize>true</optimize>
-          <encoding>UTF-8</encoding>
-        </configuration>
-      </plugin>
-      <plugin>
-        <!-- Allows us to get the apache-ds bundle artifacts -->
-        <groupId>org.apache.felix</groupId>
-        <artifactId>maven-bundle-plugin</artifactId>
-        <extensions>true</extensions>
-        <inherited>true</inherited>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-failsafe-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>run-integration-tests</id>
-            <goals>
-              <goal>integration-test</goal>
-              <goal>verify</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <version>1.5.0</version>
-        <configuration>
-          <cleanupDaemonThreads>false</cleanupDaemonThreads>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <version>0.12</version>
-        <executions>
-          <execution>
-            <phase>verify</phase>
-            <goals>
-              <goal>check</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <!-- This was added to ensure project only uses public API. Run with the following:
-              mvn checkstyle:checkstyle
-         -->
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-checkstyle-plugin</artifactId>
-        <version>2.17</version>
-        <configuration>
-          <configLocation>checkstyle.xml</configLocation>
-        </configuration>
-      </plugin>
-    </plugins>
-  </build>
-
   <dependencies>
     <dependency>
       <groupId>com.beust</groupId>
@@ -209,4 +127,137 @@
       <scope>test</scope>
     </dependency>
   </dependencies>
+  <build>
+    <pluginManagement>
+      <plugins>
+        <plugin>
+          <!-- Allows us to get the apache-ds bundle artifacts -->
+          <groupId>org.apache.felix</groupId>
+          <artifactId>maven-bundle-plugin</artifactId>
+          <version>3.0.1</version>
+        </plugin>
+      </plugins>
+    </pluginManagement>
+    <plugins>
+      <plugin>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <version>3.1</version>
+        <configuration>
+          <source>${maven.compiler.source}</source>
+          <target>${maven.compiler.target}</target>
+          <optimize>true</optimize>
+          <encoding>UTF-8</encoding>
+        </configuration>
+      </plugin>
+      <plugin>
+        <!-- Allows us to get the apache-ds bundle artifacts -->
+        <groupId>org.apache.felix</groupId>
+        <artifactId>maven-bundle-plugin</artifactId>
+        <extensions>true</extensions>
+        <inherited>true</inherited>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-failsafe-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>run-integration-tests</id>
+            <goals>
+              <goal>integration-test</goal>
+              <goal>verify</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <version>1.5.0</version>
+        <configuration>
+          <cleanupDaemonThreads>false</cleanupDaemonThreads>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <version>0.12</version>
+        <executions>
+          <execution>
+            <phase>verify</phase>
+            <goals>
+              <goal>check</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <!-- This was added to ensure project only uses public API. Run with the following:
+              mvn checkstyle:checkstyle
+         -->
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <version>2.17</version>
+        <configuration>
+          <configLocation>build/checkstyle.xml</configLocation>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>net.revelc.code</groupId>
+        <artifactId>impsort-maven-plugin</artifactId>
+        <version>1.2.0</version>
+        <executions>
+          <execution>
+            <id>sort-imports</id>
+            <goals>
+              <goal>sort</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <removeUnused>true</removeUnused>
+          <groups>java.,javax.,org.,com.</groups>
+        </configuration>
+      </plugin>
+      <plugin>
+        <!-- verify before compile; should be sorted already -->
+        <groupId>com.github.ekryd.sortpom</groupId>
+        <artifactId>sortpom-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>verify-sorted-pom</id>
+            <phase>process-resources</phase>
+            <goals>
+              <goal>verify</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>net.revelc.code.formatter</groupId>
+        <artifactId>formatter-maven-plugin</artifactId>
+        <version>2.8.1</version>
+        <executions>
+          <execution>
+            <id>format-java-source</id>
+            <goals>
+              <goal>format</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <configFile>${eclipseFormatterStyle}</configFile>
+          <compilerCompliance>${maven.compiler.source}</compilerCompliance>
+          <compilerSource>${maven.compiler.source}</compilerSource>
+          <compilerTargetPlatform>${maven.compiler.target}</compilerTargetPlatform>
+          <lineEnding>LF</lineEnding>
+          <overrideConfigCompilerVersion>true</overrideConfigCompilerVersion>
+          <skipJsFormatting>true</skipJsFormatting>
+          <skipHtmlFormatting>true</skipHtmlFormatting>
+          <skipXmlFormatting>true</skipXmlFormatting>
+          <skipJsonFormatting>true</skipJsonFormatting>
+          <skipCssFormatting>true</skipCssFormatting>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
 </project>
diff --git a/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java b/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java
index feb690a..ccf9cb4 100644
--- a/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java
+++ b/src/main/java/org/apache/accumulo/examples/bloom/BloomBatchScanner.java
@@ -39,14 +39,17 @@
  */
 public class BloomBatchScanner {
 
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+        .build();
 
     scan(client, "bloom_test1", 7);
     scan(client, "bloom_test2", 7);
   }
 
-  static void scan(AccumuloClient client, String tableName, int seed) throws TableNotFoundException {
+  static void scan(AccumuloClient client, String tableName, int seed)
+      throws TableNotFoundException {
     Random r = new Random(seed);
     HashSet<Range> ranges = new HashSet<>();
     HashMap<String,Boolean> expectedRows = new HashMap<>();
@@ -64,7 +67,7 @@ static void scan(AccumuloClient client, String tableName, int seed) throws Table
     System.out.println("Scanning " + tableName + " with seed " + seed);
     try (BatchScanner scan = client.createBatchScanner(tableName, Authorizations.EMPTY, 20)) {
       scan.setRanges(ranges);
-      for (Entry<Key, Value> entry : scan) {
+      for (Entry<Key,Value> entry : scan) {
         Key key = entry.getKey();
         if (!expectedRows.containsKey(key.getRow().toString())) {
           System.out.println("Encountered unexpected key: " + key);
@@ -77,7 +80,7 @@ static void scan(AccumuloClient client, String tableName, int seed) throws Table
 
     long t2 = System.currentTimeMillis();
     System.out.println(String.format("Scan finished! %6.2f lookups/sec, %.2f secs, %d results",
-            lookups / ((t2 - t1) / 1000.0), ((t2 - t1) / 1000.0), results));
+        lookups / ((t2 - t1) / 1000.0), ((t2 - t1) / 1000.0), results));
 
     int count = 0;
     for (Entry<String,Boolean> entry : expectedRows.entrySet()) {
diff --git a/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java b/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
index f46f3c0..c157b9f 100644
--- a/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
+++ b/src/main/java/org/apache/accumulo/examples/bloom/BloomFilters.java
@@ -32,8 +32,10 @@
 
 public class BloomFilters {
 
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+        .build();
     try {
       System.out.println("Creating bloom_test1 and bloom_test2");
       client.tableOperations().create("bloom_test1");
@@ -64,8 +66,8 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
   }
 
   // write a million random rows
-  static void writeData(AccumuloClient client, String tableName, int seed) throws TableNotFoundException,
-        MutationsRejectedException{
+  static void writeData(AccumuloClient client, String tableName, int seed)
+      throws TableNotFoundException, MutationsRejectedException {
     Random r = new Random(seed);
     try (BatchWriter bw = client.createBatchWriter(tableName)) {
       for (int x = 0; x < 1_000_000; x++) {
diff --git a/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java b/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java
index 1edc2c4..c0079f1 100644
--- a/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java
+++ b/src/main/java/org/apache/accumulo/examples/bloom/BloomFiltersNotFound.java
@@ -27,8 +27,10 @@
 
 public class BloomFiltersNotFound {
 
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+        .build();
     try {
       client.tableOperations().create("bloom_test3");
       client.tableOperations().create("bloom_test4");
diff --git a/src/main/java/org/apache/accumulo/examples/cli/BatchScannerOpts.java b/src/main/java/org/apache/accumulo/examples/cli/BatchScannerOpts.java
index 052b642..10a93c0 100644
--- a/src/main/java/org/apache/accumulo/examples/cli/BatchScannerOpts.java
+++ b/src/main/java/org/apache/accumulo/examples/cli/BatchScannerOpts.java
@@ -24,7 +24,8 @@
   @Parameter(names = "--scanThreads", description = "Number of threads to use when batch scanning")
   public Integer scanThreads = 10;
 
-  @Parameter(names = "--scanTimeout", converter = TimeConverter.class, description = "timeout used to fail a batch scan")
+  @Parameter(names = "--scanTimeout", converter = TimeConverter.class,
+      description = "timeout used to fail a batch scan")
   public Long scanTimeout = Long.MAX_VALUE;
 
 }
diff --git a/src/main/java/org/apache/accumulo/examples/cli/BatchWriterOpts.java b/src/main/java/org/apache/accumulo/examples/cli/BatchWriterOpts.java
index 19b2395..224f4be 100644
--- a/src/main/java/org/apache/accumulo/examples/cli/BatchWriterOpts.java
+++ b/src/main/java/org/apache/accumulo/examples/cli/BatchWriterOpts.java
@@ -27,16 +27,20 @@
 public class BatchWriterOpts {
   private static final BatchWriterConfig BWDEFAULTS = new BatchWriterConfig();
 
-  @Parameter(names = "--batchThreads", description = "Number of threads to use when writing large batches")
+  @Parameter(names = "--batchThreads",
+      description = "Number of threads to use when writing large batches")
   public Integer batchThreads = BWDEFAULTS.getMaxWriteThreads();
 
-  @Parameter(names = "--batchLatency", converter = TimeConverter.class, description = "The maximum time to wait before flushing data to servers when writing")
+  @Parameter(names = "--batchLatency", converter = TimeConverter.class,
+      description = "The maximum time to wait before flushing data to servers when writing")
   public Long batchLatency = BWDEFAULTS.getMaxLatency(TimeUnit.MILLISECONDS);
 
-  @Parameter(names = "--batchMemory", converter = MemoryConverter.class, description = "memory used to batch data when writing")
+  @Parameter(names = "--batchMemory", converter = MemoryConverter.class,
+      description = "memory used to batch data when writing")
   public Long batchMemory = BWDEFAULTS.getMaxMemory();
 
-  @Parameter(names = "--batchTimeout", converter = TimeConverter.class, description = "timeout used to fail a batch write")
+  @Parameter(names = "--batchTimeout", converter = TimeConverter.class,
+      description = "timeout used to fail a batch write")
   public Long batchTimeout = BWDEFAULTS.getTimeout(TimeUnit.MILLISECONDS);
 
   public BatchWriterConfig getBatchWriterConfig() {
diff --git a/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java b/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
index 40b5cfa..5d38f73 100644
--- a/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
+++ b/src/main/java/org/apache/accumulo/examples/cli/ClientOpts.java
@@ -50,8 +50,8 @@ public ColumnVisibility convert(String value) {
   public static class TimeConverter implements IStringConverter<Long> {
     @Override
     public Long convert(String value) {
-      if(value.matches("[0-9]+"))
-        value = "PT"+value+"S"; //if only numbers then assume seconds
+      if (value.matches("[0-9]+"))
+        value = "PT" + value + "S"; // if only numbers then assume seconds
       return Duration.parse(value).toMillis();
     }
   }
@@ -76,8 +76,9 @@ public Long convert(String str) {
         }
         return Long.parseLong(str.substring(0, str.length() - 1)) << multiplier;
       } catch (Exception ex) {
-        throw new IllegalArgumentException("The value '" + str + "' is not a valid memory setting. A valid value would a number "
-            + "possibily followed by an optional 'G', 'M', 'K', or 'B'.");
+        throw new IllegalArgumentException(
+            "The value '" + str + "' is not a valid memory setting. A valid value would a number "
+                + "possibily followed by an optional 'G', 'M', 'K', or 'B'.");
       }
     }
   }
@@ -97,7 +98,8 @@ public File convert(String filename) {
       description = "Accumulo client properties file.  See README.md for details.")
   private File config = null;
 
-  @Parameter(names = {"-auths", "--auths"}, converter = AuthConverter.class, description = "the authorizations to use when reading or writing")
+  @Parameter(names = {"-auths", "--auths"}, converter = AuthConverter.class,
+      description = "the authorizations to use when reading or writing")
   public Authorizations auths = Authorizations.EMPTY;
 
   private ClientInfo cachedInfo = null;
@@ -107,7 +109,7 @@ public AccumuloClient getAccumuloClient() {
     if (cachedAccumuloClient == null) {
       try {
         cachedAccumuloClient = Accumulo.newClient().usingClientInfo(getClientInfo()).build();
-      } catch (AccumuloException|AccumuloSecurityException e) {
+      } catch (AccumuloException | AccumuloSecurityException e) {
         throw new IllegalArgumentException(e);
       }
     }
diff --git a/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnRequiredTable.java b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnRequiredTable.java
index 67ca57a..520107b 100644
--- a/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnRequiredTable.java
+++ b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOnRequiredTable.java
@@ -29,7 +29,8 @@
   @Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
   private String tableName;
 
-  @Parameter(names = {"-tf", "--tokenFile"}, description = "File in hdfs containing the user's authentication token create with \"bin/accumulo create-token\"")
+  @Parameter(names = {"-tf", "--tokenFile"},
+      description = "File in hdfs containing the user's authentication token create with \"bin/accumulo create-token\"")
   private String tokenFile = "";
 
   @Override
diff --git a/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java
index 87b296f..0accf55 100644
--- a/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java
+++ b/src/main/java/org/apache/accumulo/examples/cli/MapReduceClientOpts.java
@@ -59,12 +59,14 @@ public AuthenticationToken getToken() {
         AccumuloClient client = getAccumuloClient();
 
         // Do the explicit check to see if the user has the permission to get a delegation token
-        if (!client.securityOperations().hasSystemPermission(client.whoami(), SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
+        if (!client.securityOperations().hasSystemPermission(client.whoami(),
+            SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
           log.error(
               "{} doesn't have the {} SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's"
                   + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.",
               user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name());
-          throw new IllegalStateException(client.whoami() + " does not have permission to obtain a delegation token");
+          throw new IllegalStateException(
+              client.whoami() + " does not have permission to obtain a delegation token");
         }
 
         // Get the delegation token from Accumulo
diff --git a/src/main/java/org/apache/accumulo/examples/cli/ScannerOpts.java b/src/main/java/org/apache/accumulo/examples/cli/ScannerOpts.java
index 00bce98..7ed5c16 100644
--- a/src/main/java/org/apache/accumulo/examples/cli/ScannerOpts.java
+++ b/src/main/java/org/apache/accumulo/examples/cli/ScannerOpts.java
@@ -19,6 +19,7 @@
 import com.beust.jcommander.Parameter;
 
 public class ScannerOpts {
-  @Parameter(names = "--scanBatchSize", description = "the number of key-values to pull during a scan")
+  @Parameter(names = "--scanBatchSize",
+      description = "the number of key-values to pull during a scan")
   public int scanBatchSize = 1000;
 }
diff --git a/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java b/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java
index 092144d..1806161 100644
--- a/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java
+++ b/src/main/java/org/apache/accumulo/examples/client/CountingVerifyingReceiver.java
@@ -49,7 +49,8 @@ public void receive(Key key, Value value) {
     byte expectedValue[] = RandomBatchWriter.createValue(rowid, expectedValueSize);
 
     if (!Arrays.equals(expectedValue, value.get())) {
-      log.error("Got unexpected value for " + key + " expected : " + new String(expectedValue, UTF_8) + " got : " + new String(value.get(), UTF_8));
+      log.error("Got unexpected value for " + key + " expected : "
+          + new String(expectedValue, UTF_8) + " got : " + new String(value.get(), UTF_8));
     }
 
     if (!expectedRows.containsKey(key.getRow())) {
diff --git a/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java b/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
index 60af086..5024f38 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RandomBatchScanner.java
@@ -25,7 +25,6 @@
 import java.util.Map.Entry;
 import java.util.Random;
 
-import com.beust.jcommander.Parameter;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -41,6 +40,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Simple example for reading random batches of data from Accumulo.
  */
@@ -53,7 +54,8 @@
     String clientProps = "conf/accumulo-client.properties";
   }
 
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     Opts opts = new Opts();
     opts.parseArgs(RandomBatchScanner.class.getName(), args);
 
@@ -83,7 +85,7 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
     log.info("Reading ranges using BatchScanner");
     try (BatchScanner scan = client.createBatchScanner("batch", Authorizations.EMPTY, 20)) {
       scan.setRanges(ranges);
-      for (Entry<Key, Value> entry : scan) {
+      for (Entry<Key,Value> entry : scan) {
         Key key = entry.getKey();
         Value value = entry.getValue();
         String row = key.getRow().toString();
diff --git a/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java b/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
index c2dc295..2019885 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RandomBatchWriter.java
@@ -42,8 +42,9 @@
 /**
  * Simple example for writing random data to Accumulo.
  *
- * The rows of the entries will be randomly generated numbers between a specified min and max (prefixed by "row_"). The column families will be "foo" and column
- * qualifiers will be "1". The values will be random byte arrays of a specified size.
+ * The rows of the entries will be randomly generated numbers between a specified min and max
+ * (prefixed by "row_"). The column families will be "foo" and column qualifiers will be "1". The
+ * values will be random byte arrays of a specified size.
  */
 public class RandomBatchWriter {
 
@@ -71,7 +72,8 @@
   }
 
   /**
-   * Creates a mutation on a specified row with column family "foo", column qualifier "1", specified visibility, and a random value of specified size.
+   * Creates a mutation on a specified row with column family "foo", column qualifier "1", specified
+   * visibility, and a random value of specified size.
    *
    * @param rowid
    *          the row of the mutation
@@ -120,14 +122,17 @@ public static long abs(long l) {
   /**
    * Writes a specified number of entries to Accumulo using a {@link BatchWriter}.
    */
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     Opts opts = new Opts();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
-    if ((opts.max - opts.min) < 1L * opts.num) { // right-side multiplied by 1L to convert to long in a way that doesn't trigger FindBugs
-      System.err.println(String.format("You must specify a min and a max that allow for at least num possible values. "
-          + "For example, you requested %d rows, but a min of %d and a max of %d (exclusive), which only allows for %d rows.", opts.num, opts.min, opts.max,
-          (opts.max - opts.min)));
+    if ((opts.max - opts.min) < 1L * opts.num) { // right-side multiplied by 1L to convert to long
+                                                 // in a way that doesn't trigger FindBugs
+      System.err.println(String.format(
+          "You must specify a min and a max that allow for at least num possible values. "
+              + "For example, you requested %d rows, but a min of %d and a max of %d (exclusive), which only allows for %d rows.",
+          opts.num, opts.min, opts.max, (opts.max - opts.min)));
       System.exit(1);
     }
     Random r;
@@ -170,7 +175,8 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
       }
 
       if (e.getConstraintViolationSummaries().size() > 0) {
-        System.err.println("ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
+        System.err.println(
+            "ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
       }
       System.exit(1);
     }
diff --git a/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java b/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
index e19c866..2761e43 100644
--- a/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/ReadWriteExample.java
@@ -18,7 +18,6 @@
 
 import java.util.Map.Entry;
 
-import com.beust.jcommander.Parameter;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.BatchWriter;
@@ -33,6 +32,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.beust.jcommander.Parameter;
+
 public class ReadWriteExample {
 
   private static final Logger log = LoggerFactory.getLogger(ReadWriteExample.class);
@@ -72,7 +73,7 @@ public static void main(String[] args) throws Exception {
 
     // read data
     try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
-      for (Entry<Key, Value> entry : scanner) {
+      for (Entry<Key,Value> entry : scanner) {
         log.info(entry.getKey().toString() + " -> " + entry.getValue().toString());
       }
     }
diff --git a/src/main/java/org/apache/accumulo/examples/client/RowOperations.java b/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
index 8a256a2..eb42d29 100644
--- a/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
+++ b/src/main/java/org/apache/accumulo/examples/client/RowOperations.java
@@ -18,7 +18,6 @@
 
 import java.util.Map.Entry;
 
-import com.beust.jcommander.Parameter;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -38,6 +37,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * A demonstration of reading entire rows and deleting entire rows.
  */
@@ -64,7 +65,8 @@ private static void printRow(String row, AccumuloClient client) throws TableNotF
     }
   }
 
-  private static void deleteRow(String row, AccumuloClient client, BatchWriter bw) throws MutationsRejectedException, TableNotFoundException {
+  private static void deleteRow(String row, AccumuloClient client, BatchWriter bw)
+      throws MutationsRejectedException, TableNotFoundException {
     Mutation mut = new Mutation(row);
     try (Scanner scanner = client.createScanner(table, Authorizations.EMPTY)) {
       scanner.setRange(Range.exact(row));
@@ -81,7 +83,8 @@ private static void deleteRow(String row, AccumuloClient client, BatchWriter bw)
     String clientProps = "conf/accumulo-client.properties";
   }
 
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     Opts opts = new Opts();
     opts.parseArgs(RowOperations.class.getName(), args);
 
@@ -137,7 +140,7 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
       printAll(client);
 
       deleteRow("row1", client, bw);
-   }
+    }
 
     log.info("This is just row3");
     printAll(client);
diff --git a/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java b/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
index a7ffc67..7eb9807 100644
--- a/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
+++ b/src/main/java/org/apache/accumulo/examples/client/SequentialBatchWriter.java
@@ -18,7 +18,6 @@
 
 import java.util.Random;
 
-import com.beust.jcommander.Parameter;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -28,11 +27,12 @@
 import org.apache.accumulo.core.client.TableNotFoundException;
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
-
 import org.apache.accumulo.examples.cli.Help;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Simple example for writing random data in sequential order to Accumulo.
  */
@@ -60,10 +60,12 @@ public static Value createValue(long rowId) {
   }
 
   /**
-   * Writes 1000 entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be sequential starting from 0.
-   * The column families will be "foo" and column qualifiers will be "1". The values will be random 50 byte arrays.
+   * Writes 1000 entries to Accumulo using a {@link BatchWriter}. The rows of the entries will be
+   * sequential starting from 0. The column families will be "foo" and column qualifiers will be
+   * "1". The values will be random 50 byte arrays.
    */
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     Opts opts = new Opts();
     opts.parseArgs(SequentialBatchWriter.class.getName(), args);
 
diff --git a/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java b/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java
index 24fe6d5..840aebd 100644
--- a/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/TraceDumpExample.java
@@ -45,17 +45,19 @@ public Opts() {
       super("trace");
     }
 
-    @Parameter(names = {"--traceid"}, description = "The hex string id of a given trace, for example 16cfbbd7beec4ae3")
+    @Parameter(names = {"--traceid"},
+        description = "The hex string id of a given trace, for example 16cfbbd7beec4ae3")
     public String traceId = "";
   }
 
-  public void dump(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+  public void dump(Opts opts)
+      throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
 
     if (opts.traceId.isEmpty()) {
       throw new IllegalArgumentException("--traceid option is required");
     }
 
-    final AccumuloClient client= opts.getAccumuloClient();
+    final AccumuloClient client = opts.getAccumuloClient();
     final String principal = opts.getPrincipal();
     final String table = opts.getTableName();
     if (!client.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
@@ -66,7 +68,8 @@ public void dump(Opts opts) throws TableNotFoundException, AccumuloException, Ac
         Thread.currentThread().interrupt();
         throw new RuntimeException(e);
       }
-      while (!client.securityOperations().hasTablePermission(principal, table, TablePermission.READ)) {
+      while (!client.securityOperations().hasTablePermission(principal, table,
+          TablePermission.READ)) {
         log.info("{} didn't propagate read permission on {}", principal, table);
         try {
           Thread.sleep(1000);
@@ -86,7 +89,8 @@ public void print(String line) {
     });
   }
 
-  public static void main(String[] args) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+  public static void main(String[] args)
+      throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
     TraceDumpExample traceDumpExample = new TraceDumpExample();
     Opts opts = new Opts();
     ScannerOpts scannerOpts = new ScannerOpts();
diff --git a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
index 78f7011..c89db03 100644
--- a/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
+++ b/src/main/java/org/apache/accumulo/examples/client/TracingExample.java
@@ -71,7 +71,8 @@ public void enableTracing(Opts opts) throws Exception {
     DistributedTrace.enable("myHost", "myApp");
   }
 
-  public void execute(Opts opts) throws TableNotFoundException, InterruptedException, AccumuloException, AccumuloSecurityException, TableExistsException {
+  public void execute(Opts opts) throws TableNotFoundException, InterruptedException,
+      AccumuloException, AccumuloSecurityException, TableExistsException {
 
     if (opts.createtable) {
       opts.getAccumuloClient().tableOperations().create(opts.getTableName());
@@ -90,15 +91,18 @@ public void execute(Opts opts) throws TableNotFoundException, InterruptedExcepti
     }
   }
 
-  private void createEntries(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+  private void createEntries(Opts opts)
+      throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
 
     // Trace the write operation. Note, unless you flush the BatchWriter, you will not capture
-    // the write operation as it is occurs asynchronously. You can optionally create additional Spans
+    // the write operation as it is occurs asynchronously. You can optionally create additional
+    // Spans
     // within a given Trace as seen below around the flush
     TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS);
 
     System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId()));
-    BatchWriter batchWriter = opts.getAccumuloClient().createBatchWriter(opts.getTableName(), new BatchWriterConfig());
+    BatchWriter batchWriter = opts.getAccumuloClient().createBatchWriter(opts.getTableName(),
+        new BatchWriterConfig());
 
     Mutation m = new Mutation("row");
     m.put("cf", "cq", "value");
@@ -112,7 +116,8 @@ private void createEntries(Opts opts) throws TableNotFoundException, AccumuloExc
     scope.close();
   }
 
-  private void readEntries(Opts opts) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+  private void readEntries(Opts opts)
+      throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
 
     Scanner scanner = opts.getAccumuloClient().createScanner(opts.getTableName(), opts.auths);
 
@@ -125,8 +130,10 @@ private void readEntries(Opts opts) throws TableNotFoundException, AccumuloExcep
       System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString());
       ++numberOfEntriesRead;
     }
-    // You can add additional metadata (key, values) to Spans which will be able to be viewed in the Monitor
-    readScope.getSpan().addKVAnnotation("Number of Entries Read".getBytes(UTF_8), String.valueOf(numberOfEntriesRead).getBytes(UTF_8));
+    // You can add additional metadata (key, values) to Spans which will be able to be viewed in the
+    // Monitor
+    readScope.getSpan().addKVAnnotation("Number of Entries Read".getBytes(UTF_8),
+        String.valueOf(numberOfEntriesRead).getBytes(UTF_8));
 
     readScope.close();
   }
diff --git a/src/main/java/org/apache/accumulo/examples/combiner/StatsCombiner.java b/src/main/java/org/apache/accumulo/examples/combiner/StatsCombiner.java
index cfa9b3a..2794c22 100644
--- a/src/main/java/org/apache/accumulo/examples/combiner/StatsCombiner.java
+++ b/src/main/java/org/apache/accumulo/examples/combiner/StatsCombiner.java
@@ -28,9 +28,11 @@
 import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
 
 /**
- * This combiner calculates the max, min, sum, and count of long integers represented as strings in values. It stores the result in a comma-separated value of
- * the form min,max,sum,count. If such a value is encountered while combining, its information is incorporated into the running calculations of min, max, sum,
- * and count. See {@link Combiner} for more information on which values are combined together.
+ * This combiner calculates the max, min, sum, and count of long integers represented as strings in
+ * values. It stores the result in a comma-separated value of the form min,max,sum,count. If such a
+ * value is encountered while combining, its information is incorporated into the running
+ * calculations of min, max, sum, and count. See {@link Combiner} for more information on which
+ * values are combined together.
  */
 public class StatsCombiner extends Combiner {
 
@@ -63,12 +65,14 @@ public Value reduce(Key key, Iterator<Value> iter) {
       }
     }
 
-    String ret = Long.toString(min, radix) + "," + Long.toString(max, radix) + "," + Long.toString(sum, radix) + "," + Long.toString(count, radix);
+    String ret = Long.toString(min, radix) + "," + Long.toString(max, radix) + ","
+        + Long.toString(sum, radix) + "," + Long.toString(count, radix);
     return new Value(ret.getBytes());
   }
 
   @Override
-  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
+  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
+      IteratorEnvironment env) throws IOException {
     super.init(source, options, env);
 
     if (options.containsKey(RADIX_OPTION))
@@ -92,7 +96,8 @@ public boolean validateOptions(Map<String,String> options) {
       return false;
 
     if (options.containsKey(RADIX_OPTION) && !options.get(RADIX_OPTION).matches("\\d+"))
-      throw new IllegalArgumentException("invalid option " + RADIX_OPTION + ":" + options.get(RADIX_OPTION));
+      throw new IllegalArgumentException(
+          "invalid option " + RADIX_OPTION + ":" + options.get(RADIX_OPTION));
 
     return true;
   }
diff --git a/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java b/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java
index 39ce728..c13ed13 100644
--- a/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java
+++ b/src/main/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraint.java
@@ -102,8 +102,10 @@ public String getViolationDescription(short violationCode) {
     return null;
   }
 
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+        .build();
     try {
       client.tableOperations().create("testConstraints");
     } catch (TableExistsException e) {
@@ -111,9 +113,11 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
     }
 
     /**
-     * Add the {@link AlphaNumKeyConstraint} to the table. Be sure to use the fully qualified class name.
+     * Add the {@link AlphaNumKeyConstraint} to the table. Be sure to use the fully qualified class
+     * name.
      */
-    int num = client.tableOperations().addConstraint("testConstraints", "org.apache.accumulo.examples.constraints.AlphaNumKeyConstraint");
+    int num = client.tableOperations().addConstraint("testConstraints",
+        "org.apache.accumulo.examples.constraints.AlphaNumKeyConstraint");
 
     System.out.println("Attempting to write non alpha numeric data to testConstraints");
     try (BatchWriter bw = client.createBatchWriter("testConstraints")) {
@@ -121,7 +125,8 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
       m.put("cf1", "cq1", new Value(("value1").getBytes()));
       bw.addMutation(m);
     } catch (MutationsRejectedException e) {
-      e.getConstraintViolationSummaries().forEach(violationSummary -> System.out.println("Constraint violated: " + violationSummary.constrainClass));
+      e.getConstraintViolationSummaries().forEach(violationSummary -> System.out
+          .println("Constraint violated: " + violationSummary.constrainClass));
     }
 
     client.tableOperations().removeConstraint("testConstraints", num);
diff --git a/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java b/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java
index 813a6fd..0260760 100644
--- a/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java
+++ b/src/main/java/org/apache/accumulo/examples/constraints/MaxMutationSize.java
@@ -51,8 +51,10 @@ public String getViolationDescription(short violationCode) {
     return violations;
   }
 
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+        .build();
     try {
       client.tableOperations().create("testConstraints");
     } catch (TableExistsException e) {
@@ -60,9 +62,11 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
     }
 
     /**
-     * Add the {@link MaxMutationSize} constraint to the table. Be sure to use the fully qualified class name
+     * Add the {@link MaxMutationSize} constraint to the table. Be sure to use the fully qualified
+     * class name
      */
-    int num = client.tableOperations().addConstraint("testConstraints", "org.apache.accumulo.examples.constraints.MaxMutationSize");
+    int num = client.tableOperations().addConstraint("testConstraints",
+        "org.apache.accumulo.examples.constraints.MaxMutationSize");
 
     System.out.println("Attempting to write a lot of mutations to testConstraints");
     try (BatchWriter bw = client.createBatchWriter("testConstraints")) {
@@ -71,7 +75,8 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
         m.put("cf" + i % 5000, "cq" + i, new Value(("value" + i).getBytes()));
       bw.addMutation(m);
     } catch (MutationsRejectedException e) {
-      e.getConstraintViolationSummaries().forEach(m -> System.out.println("Constraint violated: " + m.constrainClass));
+      e.getConstraintViolationSummaries()
+          .forEach(m -> System.out.println("Constraint violated: " + m.constrainClass));
     }
 
     client.tableOperations().removeConstraint("testConstraints", num);
diff --git a/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java b/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java
index 2b97495..28b7880 100644
--- a/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java
+++ b/src/main/java/org/apache/accumulo/examples/constraints/NumericValueConstraint.java
@@ -42,7 +42,8 @@
   static final short NON_NUMERIC_VALUE = 1;
   static final String VIOLATION_MESSAGE = "Value is not numeric";
 
-  private static final List<Short> VIOLATION_LIST = Collections.unmodifiableList(Arrays.asList(NON_NUMERIC_VALUE));
+  private static final List<Short> VIOLATION_LIST = Collections
+      .unmodifiableList(Arrays.asList(NON_NUMERIC_VALUE));
 
   private boolean isNumeric(byte bytes[]) {
     for (byte b : bytes) {
@@ -77,8 +78,10 @@ public String getViolationDescription(short violationCode) {
     return null;
   }
 
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+        .build();
     try {
       client.tableOperations().create("testConstraints");
     } catch (TableExistsException e) {
@@ -86,9 +89,11 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
     }
 
     /**
-     * Add the {@link NumericValueConstraint} constraint to the table.  Be sure to use the fully qualified class name
+     * Add the {@link NumericValueConstraint} constraint to the table. Be sure to use the fully
+     * qualified class name
      */
-    int num = client.tableOperations().addConstraint("testConstraints", "org.apache.accumulo.examples.constraints.NumericValueConstraint");
+    int num = client.tableOperations().addConstraint("testConstraints",
+        "org.apache.accumulo.examples.constraints.NumericValueConstraint");
 
     System.out.println("Attempting to write non numeric data to testConstraints");
     try (BatchWriter bw = client.createBatchWriter("testConstraints")) {
@@ -96,7 +101,8 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
       m.put("cf1", "cq1", new Value(("value1--$$@@%%").getBytes()));
       bw.addMutation(m);
     } catch (MutationsRejectedException e) {
-      e.getConstraintViolationSummaries().forEach(m -> System.out.println("Constraint violated: " + m.constrainClass));
+      e.getConstraintViolationSummaries()
+          .forEach(m -> System.out.println("Constraint violated: " + m.constrainClass));
     }
 
     client.tableOperations().removeConstraint("testConstraints", num);
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java b/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
index 9c9f1a4..8e3090a 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/FileCount.java
@@ -36,7 +36,8 @@
 import com.beust.jcommander.Parameter;
 
 /**
- * Computes recursive counts over file system information and stores them back into the same Accumulo table.
+ * Computes recursive counts over file system information and stores them back into the same
+ * Accumulo table.
  */
 public class FileCount {
 
@@ -65,7 +66,9 @@ void set(Value val) {
     }
 
     Value toValue() {
-      return new Value((dirCount + "," + fileCount + "," + recursiveDirCount + "," + recusiveFileCount).getBytes());
+      return new Value(
+          (dirCount + "," + fileCount + "," + recursiveDirCount + "," + recusiveFileCount)
+              .getBytes());
     }
 
     void incrementFiles() {
@@ -101,7 +104,8 @@ private int findMaxDepth(Scanner scanner, int min, int mid, int max) {
     if (max < min)
       return -1;
 
-    scanner.setRange(new Range(String.format("%03d", mid), true, String.format("%03d", mid + 1), false));
+    scanner.setRange(
+        new Range(String.format("%03d", mid), true, String.format("%03d", mid + 1), false));
 
     if (scanner.iterator().hasNext()) {
       // this depth exist, check to see if a larger depth exist
@@ -127,7 +131,8 @@ private int findMaxDepth(Scanner scanner) {
   }
 
   // find the count column and consume a row
-  private Entry<Key,Value> findCount(Entry<Key,Value> entry, Iterator<Entry<Key,Value>> iterator, CountValue cv) {
+  private Entry<Key,Value> findCount(Entry<Key,Value> entry, Iterator<Entry<Key,Value>> iterator,
+      CountValue cv) {
 
     Key key = entry.getKey();
     Text currentRow = key.getRow();
@@ -143,7 +148,8 @@ private int findMaxDepth(Scanner scanner) {
       if (key.compareRow(currentRow) != 0)
         return entry;
 
-      if (key.compareColumnFamily(QueryUtil.DIR_COLF) == 0 && key.compareColumnQualifier(QueryUtil.COUNTS_COLQ) == 0) {
+      if (key.compareColumnFamily(QueryUtil.DIR_COLF) == 0
+          && key.compareColumnQualifier(QueryUtil.COUNTS_COLQ) == 0) {
         cv.set(entry.getValue());
       }
 
@@ -179,9 +185,11 @@ private Mutation createMutation(int depth, String dir, CountValue countVal) {
     return m;
   }
 
-  private void calculateCounts(Scanner scanner, int depth, BatchWriter batchWriter) throws Exception {
+  private void calculateCounts(Scanner scanner, int depth, BatchWriter batchWriter)
+      throws Exception {
 
-    scanner.setRange(new Range(String.format("%03d", depth), true, String.format("%03d", depth + 1), false));
+    scanner.setRange(
+        new Range(String.format("%03d", depth), true, String.format("%03d", depth + 1), false));
 
     CountValue countVal = new CountValue();
 
@@ -237,7 +245,8 @@ private void calculateCounts(Scanner scanner, int depth, BatchWriter batchWriter
     }
   }
 
-  public FileCount(AccumuloClient client, String tableName, Authorizations auths, ColumnVisibility cv, ScannerOpts scanOpts, BatchWriterOpts bwOpts) throws Exception {
+  public FileCount(AccumuloClient client, String tableName, Authorizations auths,
+      ColumnVisibility cv, ScannerOpts scanOpts, BatchWriterOpts bwOpts) throws Exception {
     this.client = client;
     this.tableName = tableName;
     this.auths = auths;
@@ -279,7 +288,8 @@ public void run() throws Exception {
   }
 
   public static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--vis", description = "use a given visibility for the new counts", converter = VisibilityConverter.class)
+    @Parameter(names = "--vis", description = "use a given visibility for the new counts",
+        converter = VisibilityConverter.class)
     ColumnVisibility visibility = new ColumnVisibility();
   }
 
@@ -290,7 +300,8 @@ public static void main(String[] args) throws Exception {
     String programName = FileCount.class.getName();
     opts.parseArgs(programName, args, scanOpts, bwOpts);
 
-    FileCount fileCount = new FileCount(opts.getAccumuloClient(), opts.getTableName(), opts.auths, opts.visibility, scanOpts, bwOpts);
+    FileCount fileCount = new FileCount(opts.getAccumuloClient(), opts.getTableName(), opts.auths,
+        opts.visibility, scanOpts, bwOpts);
     fileCount.run();
   }
 }
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java b/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
index 0fba29a..3c4a0f8 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
@@ -38,8 +38,9 @@
 import com.beust.jcommander.Parameter;
 
 /**
- * Recursively lists the files and directories under a given path, ingests their names and file info into one Accumulo table, indexes the file names in a
- * separate table, and the file data into a third table.
+ * Recursively lists the files and directories under a given path, ingests their names and file info
+ * into one Accumulo table, indexes the file names in a separate table, and the file data into a
+ * third table.
  */
 public class Ingest {
   static final Value nullValue = new Value(new byte[0]);
@@ -50,8 +51,8 @@
   public static final String HASH_CQ = "md5";
   public static final Encoder<Long> encoder = LongCombiner.FIXED_LEN_ENCODER;
 
-  public static Mutation buildMutation(ColumnVisibility cv, String path, boolean isDir, boolean isHidden, boolean canExec, long length, long lastmod,
-      String hash) {
+  public static Mutation buildMutation(ColumnVisibility cv, String path, boolean isDir,
+      boolean isHidden, boolean canExec, long length, long lastmod, String hash) {
     if (path.equals("/"))
       path = "";
     Mutation m = new Mutation(QueryUtil.getRow(path));
@@ -69,7 +70,8 @@ public static Mutation buildMutation(ColumnVisibility cv, String path, boolean i
     return m;
   }
 
-  private static void ingest(File src, ColumnVisibility cv, BatchWriter dirBW, BatchWriter indexBW, FileDataIngest fdi, BatchWriter data) throws Exception {
+  private static void ingest(File src, ColumnVisibility cv, BatchWriter dirBW, BatchWriter indexBW,
+      FileDataIngest fdi, BatchWriter data) throws Exception {
     // build main table entry
     String path = null;
     try {
@@ -89,7 +91,8 @@ private static void ingest(File src, ColumnVisibility cv, BatchWriter dirBW, Bat
       }
     }
 
-    dirBW.addMutation(buildMutation(cv, path, src.isDirectory(), src.isHidden(), src.canExecute(), src.length(), src.lastModified(), hash));
+    dirBW.addMutation(buildMutation(cv, path, src.isDirectory(), src.isHidden(), src.canExecute(),
+        src.length(), src.lastModified(), hash));
 
     // build index table entries
     Text row = QueryUtil.getForwardIndex(path);
@@ -106,7 +109,8 @@ private static void ingest(File src, ColumnVisibility cv, BatchWriter dirBW, Bat
     }
   }
 
-  private static void recurse(File src, ColumnVisibility cv, BatchWriter dirBW, BatchWriter indexBW, FileDataIngest fdi, BatchWriter data) throws Exception {
+  private static void recurse(File src, ColumnVisibility cv, BatchWriter dirBW, BatchWriter indexBW,
+      FileDataIngest fdi, BatchWriter data) throws Exception {
     // ingest this File
     ingest(src, cv, dirBW, indexBW, fdi, data);
     // recurse into subdirectories
@@ -127,7 +131,8 @@ private static void recurse(File src, ColumnVisibility cv, BatchWriter dirBW, Ba
     String indexTable = "indexTable";
     @Parameter(names = "--dataTable", description = "the file data, chunked into parts")
     String dataTable = "dataTable";
-    @Parameter(names = "--vis", description = "the visibility to mark the data", converter = VisibilityConverter.class)
+    @Parameter(names = "--vis", description = "the visibility to mark the data",
+        converter = VisibilityConverter.class)
     ColumnVisibility visibility = new ColumnVisibility();
     @Parameter(names = "--chunkSize", description = "the size of chunks when breaking down files")
     int chunkSize = 100000;
@@ -140,14 +145,15 @@ public static void main(String[] args) throws Exception {
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(Ingest.class.getName(), args, bwOpts);
 
-    AccumuloClient client= opts.getAccumuloClient();
+    AccumuloClient client = opts.getAccumuloClient();
     if (!client.tableOperations().exists(opts.nameTable))
       client.tableOperations().create(opts.nameTable);
     if (!client.tableOperations().exists(opts.indexTable))
       client.tableOperations().create(opts.indexTable);
     if (!client.tableOperations().exists(opts.dataTable)) {
       client.tableOperations().create(opts.dataTable);
-      client.tableOperations().attachIterator(opts.dataTable, new IteratorSetting(1, ChunkCombiner.class));
+      client.tableOperations().attachIterator(opts.dataTable,
+          new IteratorSetting(1, ChunkCombiner.class));
     }
 
     BatchWriter dirBW = client.createBatchWriter(opts.nameTable, bwOpts.getBatchWriterConfig());
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java b/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
index f7c5f45..da2a96c 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/QueryUtil.java
@@ -37,11 +37,11 @@
 import com.beust.jcommander.Parameter;
 
 /**
- * Provides utility methods for getting the info for a file, listing the contents of a directory, and performing single wild card searches on file or directory
- * names.
+ * Provides utility methods for getting the info for a file, listing the contents of a directory,
+ * and performing single wild card searches on file or directory names.
  */
 public class QueryUtil {
-  private AccumuloClient client= null;
+  private AccumuloClient client = null;
   private String tableName;
   private Authorizations auths;
   public static final Text DIR_COLF = new Text("dir");
@@ -72,7 +72,8 @@ public static int getDepth(String path) {
   }
 
   /**
-   * Given a path, construct an accumulo row prepended with the path's depth for the directory table.
+   * Given a path, construct an accumulo row prepended with the path's depth for the directory
+   * table.
    *
    * @param path
    *          the full path of a file or directory
@@ -85,7 +86,8 @@ public static Text getRow(String path) {
   }
 
   /**
-   * Given a path, construct an accumulo row prepended with the {@link #FORWARD_PREFIX} for the index table.
+   * Given a path, construct an accumulo row prepended with the {@link #FORWARD_PREFIX} for the
+   * index table.
    *
    * @param path
    *          the full path of a file or directory
@@ -101,7 +103,8 @@ public static Text getForwardIndex(String path) {
   }
 
   /**
-   * Given a path, construct an accumulo row prepended with the {@link #REVERSE_PREFIX} with the path reversed for the index table.
+   * Given a path, construct an accumulo row prepended with the {@link #REVERSE_PREFIX} with the
+   * path reversed for the index table.
    *
    * @param path
    *          the full path of a file or directory
@@ -147,7 +150,8 @@ public static String getType(Text colf) {
     for (Entry<Key,Value> e : scanner) {
       String type = getType(e.getKey().getColumnFamily());
       data.put("fullname", e.getKey().getRow().toString().substring(3));
-      data.put(type + e.getKey().getColumnQualifier().toString() + ":" + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
+      data.put(type + e.getKey().getColumnQualifier().toString() + ":"
+          + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
     }
     return data;
   }
@@ -172,7 +176,8 @@ public static String getType(Text colf) {
         fim.put(name, new TreeMap<String,String>());
         fim.get(name).put("fullname", e.getKey().getRow().toString().substring(3));
       }
-      fim.get(name).put(type + e.getKey().getColumnQualifier().toString() + ":" + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
+      fim.get(name).put(type + e.getKey().getColumnQualifier().toString() + ":"
+          + e.getKey().getColumnVisibility().toString(), new String(e.getValue().get()));
     }
     return fim;
   }
@@ -191,10 +196,12 @@ public static String getType(Text colf) {
   }
 
   /**
-   * Scans over the index table for files or directories with a given name, prefix, or suffix (indicated by a wildcard '*' at the beginning or end of the term.
+   * Scans over the index table for files or directories with a given name, prefix, or suffix
+   * (indicated by a wildcard '*' at the beginning or end of the term.
    *
    * @param exp
-   *          the name a file or directory to search for with an optional wildcard '*' at the beginning or end
+   *          the name a file or directory to search for with an optional wildcard '*' at the
+   *          beginning or end
    */
   public Iterable<Entry<Key,Value>> singleRestrictedWildCardSearch(String exp) throws Exception {
     if (exp.indexOf("/") >= 0)
@@ -218,7 +225,8 @@ public static String getType(Text colf) {
   }
 
   /**
-   * Scans over the index table for files or directories with a given name that can contain a single wildcard '*' anywhere in the term.
+   * Scans over the index table for files or directories with a given name that can contain a single
+   * wildcard '*' anywhere in the term.
    *
    * @param exp
    *          the name a file or directory to search for with one optional wildcard '*'
@@ -240,10 +248,12 @@ public static String getType(Text colf) {
 
     Scanner scanner = client.createScanner(tableName, auths);
     if (firstPart.length() >= lastPart.length()) {
-      System.out.println("executing middle wildcard search for " + regexString + " from entries starting with " + firstPart);
+      System.out.println("executing middle wildcard search for " + regexString
+          + " from entries starting with " + firstPart);
       scanner.setRange(Range.prefix(getForwardIndex(firstPart)));
     } else {
-      System.out.println("executing middle wildcard search for " + regexString + " from entries ending with " + lastPart);
+      System.out.println("executing middle wildcard search for " + regexString
+          + " from entries ending with " + lastPart);
       scanner.setRange(Range.prefix(getReverseIndex(lastPart)));
     }
     IteratorSetting regex = new IteratorSetting(50, "regex", RegExFilter.class);
@@ -260,7 +270,8 @@ public static String getType(Text colf) {
   }
 
   /**
-   * Lists the contents of a directory using the directory table, or searches for file or directory names (if the -search flag is included).
+   * Lists the contents of a directory using the directory table, or searches for file or directory
+   * names (if the -search flag is included).
    */
   public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
diff --git a/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java b/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
index 858d850..4cb2b3d 100644
--- a/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
+++ b/src/main/java/org/apache/accumulo/examples/dirlist/Viewer.java
@@ -126,7 +126,8 @@ public void populateChildren(DefaultMutableTreeNode node) throws TableNotFoundEx
   }
 
   public void init() throws TableNotFoundException {
-    DefaultMutableTreeNode root = new DefaultMutableTreeNode(new NodeInfo(topPath, q.getData(topPath)));
+    DefaultMutableTreeNode root = new DefaultMutableTreeNode(
+        new NodeInfo(topPath, q.getData(topPath)));
     populate(root);
     populateChildren(root);
 
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java b/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java
index 0f25df5..450b666 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/CharacterHistogram.java
@@ -40,8 +40,9 @@
 import com.beust.jcommander.Parameter;
 
 /**
- * A MapReduce that computes a histogram of byte frequency for each file and stores the histogram alongside the file data. The {@link ChunkInputFormat} is used
- * to read the file data from Accumulo.
+ * A MapReduce that computes a histogram of byte frequency for each file and stores the histogram
+ * alongside the file data. The {@link ChunkInputFormat} is used to read the file data from
+ * Accumulo.
  */
 public class CharacterHistogram extends Configured implements Tool {
   public static final String VIS = "vis";
@@ -54,7 +55,8 @@ public static void main(String[] args) throws Exception {
     private ColumnVisibility cv;
 
     @Override
-    public void map(List<Entry<Key,Value>> k, InputStream v, Context context) throws IOException, InterruptedException {
+    public void map(List<Entry<Key,Value>> k, InputStream v, Context context)
+        throws IOException, InterruptedException {
       Long[] hist = new Long[256];
       for (int i = 0; i < hist.length; i++)
         hist[i] = 0l;
@@ -65,7 +67,8 @@ public void map(List<Entry<Key,Value>> k, InputStream v, Context context) throws
       }
       v.close();
       Mutation m = new Mutation(k.get(0).getKey().getRow());
-      m.put("info", "hist", cv, new Value(SummingArrayCombiner.STRING_ARRAY_ENCODER.encode(Arrays.asList(hist))));
+      m.put("info", "hist", cv,
+          new Value(SummingArrayCombiner.STRING_ARRAY_ENCODER.encode(Arrays.asList(hist))));
       context.write(new Text(), m);
     }
 
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/ChunkCombiner.java b/src/main/java/org/apache/accumulo/examples/filedata/ChunkCombiner.java
index 15c44d0..4e887fa 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/ChunkCombiner.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/ChunkCombiner.java
@@ -31,7 +31,8 @@
 import org.apache.hadoop.io.Text;
 
 /**
- * This iterator dedupes chunks and sets their visibilities to the combined visibility of the refs columns. For example, it would combine
+ * This iterator dedupes chunks and sets their visibilities to the combined visibility of the refs
+ * columns. For example, it would combine
  *
  * <pre>
  *    row1 refs uid1\0a A&amp;B V0
@@ -57,7 +58,8 @@
 
   private SortedKeyValueIterator<Key,Value> source;
   private SortedKeyValueIterator<Key,Value> refsSource;
-  private static final Collection<ByteSequence> refsColf = Collections.singleton(FileDataIngest.REFS_CF_BS);
+  private static final Collection<ByteSequence> refsColf = Collections
+      .singleton(FileDataIngest.REFS_CF_BS);
   private Map<Text,byte[]> lastRowVC = Collections.emptyMap();
 
   private Key topKey = null;
@@ -66,7 +68,8 @@
   public ChunkCombiner() {}
 
   @Override
-  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
+  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
+      IteratorEnvironment env) throws IOException {
     this.source = source;
     this.refsSource = source.deepCopy(env);
   }
@@ -82,7 +85,8 @@ public void next() throws IOException {
   }
 
   @Override
-  public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
+  public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
+      throws IOException {
     source.seek(range, columnFamilies, inclusive);
     findTop();
   }
@@ -111,14 +115,16 @@ private void findTop() throws IOException {
         maxTS = source.getTopKey().getTimestamp();
 
       if (!topValue.equals(source.getTopValue()))
-        throw new RuntimeException("values not equals " + topKey + " " + source.getTopKey() + " : " + diffInfo(topValue, source.getTopValue()));
+        throw new RuntimeException("values not equals " + topKey + " " + source.getTopKey() + " : "
+            + diffInfo(topValue, source.getTopValue()));
 
       source.next();
     }
 
     byte[] vis = getVisFromRefs();
     if (vis != null) {
-      topKey = new Key(topKey.getRowData().toArray(), topKey.getColumnFamilyData().toArray(), topKey.getColumnQualifierData().toArray(), vis, maxTS);
+      topKey = new Key(topKey.getRowData().toArray(), topKey.getColumnFamilyData().toArray(),
+          topKey.getColumnQualifierData().toArray(), vis, maxTS);
     }
     return vis;
   }
@@ -154,7 +160,8 @@ private String diffInfo(Value v1, Value v2) {
 
     for (int i = 0; i < vb1.length; i++) {
       if (vb1[i] != vb2[i]) {
-        return String.format("first diff at offset %,d 0x%02x != 0x%02x", i, 0xff & vb1[i], 0xff & vb2[i]);
+        return String.format("first diff at offset %,d 0x%02x != 0x%02x", i, 0xff & vb1[i],
+            0xff & vb2[i]);
       }
     }
 
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputFormat.java b/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputFormat.java
index 62be1f4..dd14bc0 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputFormat.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputFormat.java
@@ -32,13 +32,13 @@
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 
 /**
- * An InputFormat that turns the file data ingested with {@link FileDataIngest} into an InputStream using {@link ChunkInputStream}. Mappers used with this
- * InputFormat must close the InputStream.
+ * An InputFormat that turns the file data ingested with {@link FileDataIngest} into an InputStream
+ * using {@link ChunkInputStream}. Mappers used with this InputFormat must close the InputStream.
  */
 public class ChunkInputFormat extends InputFormatBase<List<Entry<Key,Value>>,InputStream> {
   @Override
-  public RecordReader<List<Entry<Key,Value>>,InputStream> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException,
-      InterruptedException {
+  public RecordReader<List<Entry<Key,Value>>,InputStream> createRecordReader(InputSplit split,
+      TaskAttemptContext context) throws IOException, InterruptedException {
     return new RecordReaderBase<List<Entry<Key,Value>>,InputStream>() {
       private PeekingIterator<Entry<Key,Value>> peekingScannerIterator;
 
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputStream.java b/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputStream.java
index 9d6b59d..af521f0 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputStream.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/ChunkInputStream.java
@@ -31,7 +31,8 @@
 import org.slf4j.LoggerFactory;
 
 /**
- * An input stream that reads file data stored in one or more Accumulo values. Used by {@link ChunkInputFormat} to present input streams to a mapper.
+ * An input stream that reads file data stored in one or more Accumulo values. Used by
+ * {@link ChunkInputFormat} to present input streams to a mapper.
  */
 public class ChunkInputStream extends InputStream {
   private static final Logger log = LoggerFactory.getLogger(ChunkInputStream.class);
@@ -144,7 +145,8 @@ private int fill() throws IOException {
     }
 
     if (gotEndMarker) {
-      log.debug("got another chunk after end marker: " + currentKey.toString() + " " + thisKey.toString());
+      log.debug("got another chunk after end marker: " + currentKey.toString() + " "
+          + thisKey.toString());
       clear();
       throw new IOException("found extra chunk after end marker");
     }
@@ -152,7 +154,8 @@ private int fill() throws IOException {
     // got new chunk of the same file, check that it's the next chunk
     int thisChunk = FileDataIngest.bytesToInt(thisKey.getColumnQualifier().getBytes(), 4);
     if (thisChunk != currentChunk + 1) {
-      log.debug("new chunk same file, unexpected chunkID: " + currentKey.toString() + " " + thisKey.toString());
+      log.debug("new chunk same file, unexpected chunkID: " + currentKey.toString() + " "
+          + thisKey.toString());
       clear();
       throw new IOException("missing chunks between " + currentChunk + " and " + thisChunk);
     }
@@ -173,7 +176,8 @@ private int fill() throws IOException {
 
   public Set<Text> getVisibilities() {
     if (source != null)
-      throw new IllegalStateException("don't get visibilities before chunks have been completely read");
+      throw new IllegalStateException(
+          "don't get visibilities before chunks have been completely read");
     return currentVis;
   }
 
@@ -184,7 +188,8 @@ public int read() throws IOException {
     log.debug("pos: " + pos + " count: " + count);
     if (pos >= count) {
       if (fill() <= 0) {
-        log.debug("done reading input stream at key: " + (currentKey == null ? "null" : currentKey.toString()));
+        log.debug("done reading input stream at key: "
+            + (currentKey == null ? "null" : currentKey.toString()));
         if (source != null && source.hasNext())
           log.debug("next key: " + source.peek().getKey());
         clear();
@@ -198,7 +203,8 @@ public int read() throws IOException {
   public int read(byte[] b, int off, int len) throws IOException {
     if (b == null) {
       throw new NullPointerException();
-    } else if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length) || ((off + len) < 0)) {
+    } else if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length)
+        || ((off + len) < 0)) {
       throw new IndexOutOfBoundsException();
     } else if (len == 0) {
       return 0;
@@ -211,7 +217,8 @@ public int read(byte[] b, int off, int len) throws IOException {
       log.debug(avail + " available in current local buffer");
       if (avail <= 0) {
         if (fill() <= 0) {
-          log.debug("done reading input stream at key: " + (currentKey == null ? "null" : currentKey.toString()));
+          log.debug("done reading input stream at key: "
+              + (currentKey == null ? "null" : currentKey.toString()));
           if (source != null && source.hasNext())
             log.debug("next key: " + source.peek().getKey());
           clear();
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java b/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
index f4c2cbe..e26f849 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/FileDataIngest.java
@@ -47,8 +47,10 @@
   public static final Text REFS_CF = new Text("refs");
   public static final String REFS_ORIG_FILE = "name";
   public static final String REFS_FILE_EXT = "filext";
-  public static final ByteSequence CHUNK_CF_BS = new ArrayByteSequence(CHUNK_CF.getBytes(), 0, CHUNK_CF.getLength());
-  public static final ByteSequence REFS_CF_BS = new ArrayByteSequence(REFS_CF.getBytes(), 0, REFS_CF.getLength());
+  public static final ByteSequence CHUNK_CF_BS = new ArrayByteSequence(CHUNK_CF.getBytes(), 0,
+      CHUNK_CF.getLength());
+  public static final ByteSequence REFS_CF_BS = new ArrayByteSequence(REFS_CF.getBytes(), 0,
+      REFS_CF.getLength());
 
   int chunkSize;
   byte[] chunkSizeBytes;
@@ -68,7 +70,8 @@ public FileDataIngest(int chunkSize, ColumnVisibility colvis) {
     cv = colvis;
   }
 
-  public String insertFileData(String filename, BatchWriter bw) throws MutationsRejectedException, IOException {
+  public String insertFileData(String filename, BatchWriter bw)
+      throws MutationsRejectedException, IOException {
     if (chunkSize == 0)
       return "";
     md5digest.reset();
@@ -98,7 +101,8 @@ public String insertFileData(String filename, BatchWriter bw) throws MutationsRe
 
     // write info to accumulo
     Mutation m = new Mutation(row);
-    m.put(REFS_CF, KeyUtil.buildNullSepText(uid, REFS_ORIG_FILE), cv, new Value(filename.getBytes()));
+    m.put(REFS_CF, KeyUtil.buildNullSepText(uid, REFS_ORIG_FILE), cv,
+        new Value(filename.getBytes()));
     String fext = getExt(filename);
     if (fext != null)
       m.put(REFS_CF, KeyUtil.buildNullSepText(uid, REFS_FILE_EXT), cv, new Value(fext.getBytes()));
@@ -123,7 +127,8 @@ else if (moreRead < 0)
         m.put(CHUNK_CF, chunkCQ, cv, new Value(buf, 0, numRead));
         bw.addMutation(m);
         if (chunkCount == Integer.MAX_VALUE)
-          throw new RuntimeException("too many chunks for file " + filename + ", try raising chunk size");
+          throw new RuntimeException(
+              "too many chunks for file " + filename + ", try raising chunk size");
         chunkCount++;
         numRead = fis.read(buf);
       }
@@ -143,7 +148,8 @@ else if (moreRead < 0)
   public static int bytesToInt(byte[] b, int offset) {
     if (b.length <= offset + 3)
       throw new NumberFormatException("couldn't pull integer from bytes at offset " + offset);
-    int i = (((b[offset] & 255) << 24) + ((b[offset + 1] & 255) << 16) + ((b[offset + 2] & 255) << 8) + ((b[offset + 3] & 255) << 0));
+    int i = (((b[offset] & 255) << 24) + ((b[offset + 1] & 255) << 16)
+        + ((b[offset + 2] & 255) << 8) + ((b[offset + 3] & 255) << 0));
     return i;
   }
 
@@ -171,7 +177,8 @@ public String hexString(byte[] bytes) {
   }
 
   public static class Opts extends ClientOnRequiredTable {
-    @Parameter(names = "--vis", description = "use a given visibility for the new counts", converter = VisibilityConverter.class)
+    @Parameter(names = "--vis", description = "use a given visibility for the new counts",
+        converter = VisibilityConverter.class)
     ColumnVisibility visibility = new ColumnVisibility();
 
     @Parameter(names = "--chunk", description = "size of the chunks used to store partial files")
@@ -186,10 +193,11 @@ public static void main(String[] args) throws Exception {
     BatchWriterOpts bwOpts = new BatchWriterOpts();
     opts.parseArgs(FileDataIngest.class.getName(), args, bwOpts);
 
-    AccumuloClient client= opts.getAccumuloClient();
+    AccumuloClient client = opts.getAccumuloClient();
     if (!client.tableOperations().exists(opts.getTableName())) {
       client.tableOperations().create(opts.getTableName());
-      client.tableOperations().attachIterator(opts.getTableName(), new IteratorSetting(1, ChunkCombiner.class));
+      client.tableOperations().attachIterator(opts.getTableName(),
+          new IteratorSetting(1, ChunkCombiner.class));
     }
     BatchWriter bw = client.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
     FileDataIngest fdi = new FileDataIngest(opts.chunkSize, opts.visibility);
@@ -197,7 +205,7 @@ public static void main(String[] args) throws Exception {
       fdi.insertFileData(filename, bw);
     }
     bw.close();
-    //TODO
-    //opts.stopTracing();
+    // TODO
+    // opts.stopTracing();
   }
 }
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java b/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java
index 094828b..3d5d78a 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/FileDataQuery.java
@@ -33,8 +33,8 @@
 import org.apache.accumulo.core.util.PeekingIterator;
 
 /**
- * Retrieves file data based on the hash of the file. Used by the {@link org.apache.accumulo.examples.dirlist.Viewer}. See README.dirlist for
- * instructions.
+ * Retrieves file data based on the hash of the file. Used by the
+ * {@link org.apache.accumulo.examples.dirlist.Viewer}. See README.dirlist for instructions.
  */
 public class FileDataQuery {
   List<Entry<Key,Value>> lastRefs;
diff --git a/src/main/java/org/apache/accumulo/examples/filedata/VisibilityCombiner.java b/src/main/java/org/apache/accumulo/examples/filedata/VisibilityCombiner.java
index 819f710..ec1814b 100644
--- a/src/main/java/org/apache/accumulo/examples/filedata/VisibilityCombiner.java
+++ b/src/main/java/org/apache/accumulo/examples/filedata/VisibilityCombiner.java
@@ -21,7 +21,8 @@
 import org.apache.accumulo.core.data.ByteSequence;
 
 /**
- * A utility for merging visibilities into the form {@code (VIS1)|(VIS2)|...|(VISN)}. Used by the {@link ChunkCombiner}.
+ * A utility for merging visibilities into the form {@code (VIS1)|(VIS2)|...|(VISN)}. Used by the
+ * {@link ChunkCombiner}.
  */
 public class VisibilityCombiner {
 
diff --git a/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java b/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java
index 2afd2b4..8d38b0d 100644
--- a/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java
+++ b/src/main/java/org/apache/accumulo/examples/helloworld/InsertWithBatchWriter.java
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.examples.helloworld;
 
-import com.beust.jcommander.Parameter;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -30,6 +29,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Inserts 10K rows (50K entries) into accumulo with each row having 5 entries.
  */
@@ -42,7 +43,8 @@
     String clientProps = "conf/accumulo-client.properties";
   }
 
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     Opts opts = new Opts();
     opts.parseArgs(InsertWithBatchWriter.class.getName(), args);
 
@@ -58,7 +60,8 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
       for (int i = 0; i < 10000; i++) {
         Mutation m = new Mutation(String.format("row_%d", i));
         for (int j = 0; j < 5; j++) {
-          m.put("colfam", String.format("colqual_%d", j), new Value((String.format("value_%d_%d", i, j)).getBytes()));
+          m.put("colfam", String.format("colqual_%d", j),
+              new Value((String.format("value_%d_%d", i, j)).getBytes()));
         }
         bw.addMutation(m);
         if (i % 100 == 0) {
diff --git a/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java b/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java
index 265f799..7f37ed3 100644
--- a/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java
+++ b/src/main/java/org/apache/accumulo/examples/helloworld/ReadData.java
@@ -18,7 +18,6 @@
 
 import java.util.Map.Entry;
 
-import com.beust.jcommander.Parameter;
 import org.apache.accumulo.core.client.Accumulo;
 import org.apache.accumulo.core.client.AccumuloClient;
 import org.apache.accumulo.core.client.AccumuloException;
@@ -33,6 +32,8 @@
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.beust.jcommander.Parameter;
+
 /**
  * Reads all data between two rows
  */
@@ -45,7 +46,8 @@
     String clientProps = "conf/accumulo-client.properties";
   }
 
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
     Opts opts = new Opts();
     opts.parseArgs(ReadData.class.getName(), args);
 
@@ -53,9 +55,10 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
 
     try (Scanner scan = client.createScanner("hellotable", Authorizations.EMPTY)) {
       scan.setRange(new Range(new Key("row_0"), new Key("row_1002")));
-      for (Entry<Key, Value> e : scan) {
+      for (Entry<Key,Value> e : scan) {
         Key key = e.getKey();
-        log.trace(key.getRow() + " " + key.getColumnFamily() + " " + key.getColumnQualifier() + " " + e.getValue());
+        log.trace(key.getRow() + " " + key.getColumnFamily() + " " + key.getColumnQualifier() + " "
+            + e.getValue());
       }
     }
   }
diff --git a/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java b/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java
index c4b64b3..da3e183 100644
--- a/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java
+++ b/src/main/java/org/apache/accumulo/examples/isolation/InterferenceTest.java
@@ -37,8 +37,8 @@
 import com.beust.jcommander.Parameter;
 
 /**
- * This example shows how a concurrent reader and writer can interfere with each other. It creates two threads that run forever reading and writing to the same
- * table.
+ * This example shows how a concurrent reader and writer can interfere with each other. It creates
+ * two threads that run forever reading and writing to the same table.
  *
  * When the example is run with isolation enabled, no interference will be observed.
  *
@@ -49,7 +49,8 @@
 public class InterferenceTest {
 
   private static final int NUM_ROWS = 500;
-  private static final int NUM_COLUMNS = 113; // scanner batches 1000 by default, so make num columns not a multiple of 10
+  private static final int NUM_COLUMNS = 113; // scanner batches 1000 by default, so make num
+                                              // columns not a multiple of 10
   private static final Logger log = LoggerFactory.getLogger(InterferenceTest.class);
 
   static class Writer implements Runnable {
@@ -72,7 +73,8 @@ public void run() {
         row = (row + 1) % NUM_ROWS;
 
         for (int cq = 0; cq < NUM_COLUMNS; cq++)
-          m.put(new Text("000"), new Text(String.format("%04d", cq)), new Value(("" + value).getBytes()));
+          m.put(new Text("000"), new Text(String.format("%04d", cq)),
+              new Value(("" + value).getBytes()));
 
         value++;
 
@@ -163,7 +165,9 @@ public static void main(String[] args) throws Exception {
     if (!client.tableOperations().exists(opts.getTableName()))
       client.tableOperations().create(opts.getTableName());
 
-    Thread writer = new Thread(new Writer(client.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig()), opts.iterations));
+    Thread writer = new Thread(
+        new Writer(client.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig()),
+            opts.iterations));
     writer.start();
     Reader r;
     if (opts.isolated)
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java b/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java
index 984f058..80165f5 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/NGramIngest.java
@@ -40,7 +40,8 @@
 import com.beust.jcommander.Parameter;
 
 /**
- * Map job to ingest n-gram files from http://storage.googleapis.com/books/ngrams/books/datasetsv2.html
+ * Map job to ingest n-gram files from
+ * http://storage.googleapis.com/books/ngrams/books/datasetsv2.html
  */
 public class NGramIngest extends Configured implements Tool {
 
@@ -54,11 +55,13 @@
   static class NGramMapper extends Mapper<LongWritable,Text,Text,Mutation> {
 
     @Override
-    protected void map(LongWritable location, Text value, Context context) throws IOException, InterruptedException {
+    protected void map(LongWritable location, Text value, Context context)
+        throws IOException, InterruptedException {
       String parts[] = value.toString().split("\\t");
       if (parts.length >= 4) {
         Mutation m = new Mutation(parts[0]);
-        m.put(parts[1], String.format("%010d", Long.parseLong(parts[2])), new Value(parts[3].trim().getBytes()));
+        m.put(parts[1], String.format("%010d", Long.parseLong(parts[2])),
+            new Value(parts[3].trim().getBytes()));
         context.write(null, m);
       }
     }
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/RegexExample.java b/src/main/java/org/apache/accumulo/examples/mapreduce/RegexExample.java
index 8d00d27..6e8c380 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/RegexExample.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/RegexExample.java
@@ -69,7 +69,8 @@ public int run(String[] args) throws Exception {
     opts.setAccumuloConfigs(job);
 
     IteratorSetting regex = new IteratorSetting(50, "regex", RegExFilter.class);
-    RegExFilter.setRegexs(regex, opts.rowRegex, opts.columnFamilyRegex, opts.columnQualifierRegex, opts.valueRegex, false);
+    RegExFilter.setRegexs(regex, opts.rowRegex, opts.columnFamilyRegex, opts.columnQualifierRegex,
+        opts.valueRegex, false);
     AccumuloInputFormat.addIterator(job, regex);
 
     job.setMapperClass(RegexMapper.class);
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/RowHash.java b/src/main/java/org/apache/accumulo/examples/mapreduce/RowHash.java
index f19ecad..a72e615 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/RowHash.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/RowHash.java
@@ -46,7 +46,8 @@
     @Override
     public void map(Key row, Value data, Context context) throws IOException, InterruptedException {
       Mutation m = new Mutation(row.getRow());
-      m.put(new Text("cf-HASHTYPE"), new Text("cq-MD5BASE64"), new Value(Base64.getEncoder().encode(MD5Hash.digest(data.toString()).getDigest())));
+      m.put(new Text("cf-HASHTYPE"), new Text("cq-MD5BASE64"),
+          new Value(Base64.getEncoder().encode(MD5Hash.digest(data.toString()).getDigest())));
       context.write(null, m);
       context.progress();
     }
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/TableToFile.java b/src/main/java/org/apache/accumulo/examples/mapreduce/TableToFile.java
index 1813f0c..1b80960 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/TableToFile.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/TableToFile.java
@@ -67,7 +67,8 @@ public void map(Key row, Value data, Context context) throws IOException, Interr
   }
 
   @Override
-  public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException, AccumuloSecurityException {
+  public int run(String[] args)
+      throws IOException, InterruptedException, ClassNotFoundException, AccumuloSecurityException {
     Job job = Job.getInstance(getConf());
     job.setJobName(this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
     job.setJarByClass(this.getClass());
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/TeraSortIngest.java b/src/main/java/org/apache/accumulo/examples/mapreduce/TeraSortIngest.java
index 39e8928..ddbcef7 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/TeraSortIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/TeraSortIngest.java
@@ -50,8 +50,9 @@
 import com.beust.jcommander.Parameter;
 
 /**
- * Generate the *almost* official terasort input data set. (See below) The user specifies the number of rows and the output directory and this class runs a
- * map/reduce program to generate the data. The format of the data is:
+ * Generate the *almost* official terasort input data set. (See below) The user specifies the number
+ * of rows and the output directory and this class runs a map/reduce program to generate the data.
+ * The format of the data is:
  * <ul>
  * <li>(10 bytes key) (10 bytes rowid) (78 bytes filler) \r \n
  * <li>The keys are random characters from the set ' ' .. '~'.
@@ -59,9 +60,11 @@
  * <li>The filler consists of 7 runs of 10 characters from 'A' to 'Z'.
  * </ul>
  *
- * This TeraSort is slightly modified to allow for variable length key sizes and value sizes. The row length isn't variable. To generate a terabyte of data in
- * the same way TeraSort does use 10000000000 rows and 10/10 byte key length and 78/78 byte value length. Along with the 10 byte row id and \r\n this gives you
- * 100 byte row * 10000000000 rows = 1tb. Min/Max ranges for key and value parameters are inclusive/inclusive respectively.
+ * This TeraSort is slightly modified to allow for variable length key sizes and value sizes. The
+ * row length isn't variable. To generate a terabyte of data in the same way TeraSort does use
+ * 10000000000 rows and 10/10 byte key length and 78/78 byte value length. Along with the 10 byte
+ * row id and \r\n this gives you 100 byte row * 10000000000 rows = 1tb. Min/Max ranges for key and
+ * value parameters are inclusive/inclusive respectively.
  *
  *
  */
@@ -140,7 +143,8 @@ public NullWritable getCurrentValue() throws IOException, InterruptedException {
       }
 
       @Override
-      public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {}
+      public void initialize(InputSplit split, TaskAttemptContext context)
+          throws IOException, InterruptedException {}
 
       @Override
       public boolean nextKeyValue() throws IOException, InterruptedException {
@@ -153,7 +157,8 @@ public boolean nextKeyValue() throws IOException, InterruptedException {
     }
 
     @Override
-    public RecordReader<LongWritable,NullWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException {
+    public RecordReader<LongWritable,NullWritable> createRecordReader(InputSplit split,
+        TaskAttemptContext context) throws IOException {
       // reporter.setStatus("Creating record reader");
       return new RangeRecordReader((RangeInputSplit) split);
     }
@@ -166,7 +171,8 @@ public boolean nextKeyValue() throws IOException, InterruptedException {
       long totalRows = job.getConfiguration().getLong(NUMROWS, 0);
       int numSplits = job.getConfiguration().getInt(NUMSPLITS, 1);
       long rowsPerSplit = totalRows / numSplits;
-      System.out.println("Generating " + totalRows + " using " + numSplits + " maps with step of " + rowsPerSplit);
+      System.out.println(
+          "Generating " + totalRows + " using " + numSplits + " maps with step of " + rowsPerSplit);
       ArrayList<InputSplit> splits = new ArrayList<>(numSplits);
       long currentRow = 0;
       for (int split = 0; split < numSplits - 1; ++split) {
@@ -191,11 +197,14 @@ public boolean nextKeyValue() throws IOException, InterruptedException {
      */
     private static final int seedSkip = 128 * 1024 * 1024;
     /**
-     * The precomputed seed values after every seedSkip iterations. There should be enough values so that a 2**32 iterations are covered.
+     * The precomputed seed values after every seedSkip iterations. There should be enough values so
+     * that a 2**32 iterations are covered.
      */
-    private static final long[] seeds = new long[] {0L, 4160749568L, 4026531840L, 3892314112L, 3758096384L, 3623878656L, 3489660928L, 3355443200L, 3221225472L,
-        3087007744L, 2952790016L, 2818572288L, 2684354560L, 2550136832L, 2415919104L, 2281701376L, 2147483648L, 2013265920L, 1879048192L, 1744830464L,
-        1610612736L, 1476395008L, 1342177280L, 1207959552L, 1073741824L, 939524096L, 805306368L, 671088640L, 536870912L, 402653184L, 268435456L, 134217728L,};
+    private static final long[] seeds = new long[] {0L, 4160749568L, 4026531840L, 3892314112L,
+        3758096384L, 3623878656L, 3489660928L, 3355443200L, 3221225472L, 3087007744L, 2952790016L,
+        2818572288L, 2684354560L, 2550136832L, 2415919104L, 2281701376L, 2147483648L, 2013265920L,
+        1879048192L, 1744830464L, 1610612736L, 1476395008L, 1342177280L, 1207959552L, 1073741824L,
+        939524096L, 805306368L, 671088640L, 536870912L, 402653184L, 268435456L, 134217728L,};
 
     /**
      * Start the random number generator on the given iteration.
@@ -286,7 +295,8 @@ private Text getRowIdString(long rowId) {
     }
 
     /**
-     * Add the required filler bytes. Each row consists of 7 blocks of 10 characters and 1 block of 8 characters.
+     * Add the required filler bytes. Each row consists of 7 blocks of 10 characters and 1 block of
+     * 8 characters.
      *
      * @param rowId
      *          the current row number
@@ -310,7 +320,8 @@ private void addFiller(long rowId) {
     }
 
     @Override
-    public void map(LongWritable row, NullWritable ignored, Context context) throws IOException, InterruptedException {
+    public void map(LongWritable row, NullWritable ignored, Context context)
+        throws IOException, InterruptedException {
       context.setStatus("Entering");
       long rowId = row.get();
       if (rand == null) {
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/TokenFileWordCount.java b/src/main/java/org/apache/accumulo/examples/mapreduce/TokenFileWordCount.java
index 5b9935e..9d7c9da 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/TokenFileWordCount.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/TokenFileWordCount.java
@@ -35,8 +35,10 @@
 import org.slf4j.LoggerFactory;
 
 /**
- * A simple map reduce job that inserts word counts into accumulo. See the README for instructions on how to run this. This version does not use the ClientOpts
- * class to parse arguments as an example of using AccumuloInputFormat and AccumuloOutputFormat directly. See README.mapred for more details.
+ * A simple map reduce job that inserts word counts into accumulo. See the README for instructions
+ * on how to run this. This version does not use the ClientOpts class to parse arguments as an
+ * example of using AccumuloInputFormat and AccumuloOutputFormat directly. See README.mapred for
+ * more details.
  *
  */
 public class TokenFileWordCount extends Configured implements Tool {
@@ -88,7 +90,8 @@ public int run(String[] args) throws Exception {
     job.setOutputValueClass(Mutation.class);
 
     // AccumuloInputFormat not used here, but it uses the same functions.
-    AccumuloOutputFormat.setZooKeeperInstance(job, ClientConfiguration.loadDefault().withInstance(instance).withZkHosts(zookeepers));
+    AccumuloOutputFormat.setZooKeeperInstance(job,
+        ClientConfiguration.loadDefault().withInstance(instance).withZkHosts(zookeepers));
     AccumuloOutputFormat.setConnectorInfo(job, user, tokenFile);
     AccumuloOutputFormat.setCreateTables(job, true);
     AccumuloOutputFormat.setDefaultTableName(job, tableName);
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/UniqueColumns.java b/src/main/java/org/apache/accumulo/examples/mapreduce/UniqueColumns.java
index 29a4a17..017700a 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/UniqueColumns.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/UniqueColumns.java
@@ -40,8 +40,8 @@
 import com.beust.jcommander.Parameter;
 
 /**
- * A simple map reduce job that computes the unique column families and column qualifiers in a table. This example shows one way to run against an offline
- * table.
+ * A simple map reduce job that computes the unique column families and column qualifiers in a
+ * table. This example shows one way to run against an offline table.
  */
 public class UniqueColumns extends Configured implements Tool {
 
@@ -53,7 +53,8 @@
     private static final Text CQ = new Text("cq:");
 
     @Override
-    public void map(Key key, Value value, Context context) throws IOException, InterruptedException {
+    public void map(Key key, Value value, Context context)
+        throws IOException, InterruptedException {
       temp.set(CF);
       ByteSequence cf = key.getColumnFamilyData();
       temp.append(cf.getBackingArray(), cf.offset(), cf.length());
@@ -68,7 +69,8 @@ public void map(Key key, Value value, Context context) throws IOException, Inter
 
   public static class UReducer extends Reducer<Text,Text,Text,Text> {
     @Override
-    public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
+    public void reduce(Text key, Iterable<Text> values, Context context)
+        throws IOException, InterruptedException {
       context.write(key, EMPTY);
     }
   }
@@ -100,13 +102,15 @@ public int run(String[] args) throws Exception {
 
     if (opts.offline) {
       /*
-       * this example clones the table and takes it offline. If you plan to run map reduce jobs over a table many times, it may be more efficient to compact the
-       * table, clone it, and then keep using the same clone as input for map reduce.
+       * this example clones the table and takes it offline. If you plan to run map reduce jobs over
+       * a table many times, it may be more efficient to compact the table, clone it, and then keep
+       * using the same clone as input for map reduce.
        */
 
       client = opts.getAccumuloClient();
       clone = opts.getTableName() + "_" + jobName;
-      client.tableOperations().clone(opts.getTableName(), clone, true, new HashMap<String,String>(), new HashSet<String>());
+      client.tableOperations().clone(opts.getTableName(), clone, true, new HashMap<String,String>(),
+          new HashSet<String>());
       client.tableOperations().offline(clone);
 
       AccumuloInputFormat.setOfflineTableScan(job, true);
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/WordCount.java b/src/main/java/org/apache/accumulo/examples/mapreduce/WordCount.java
index 8a183a2..41d511b 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/WordCount.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/WordCount.java
@@ -38,7 +38,8 @@
 import com.beust.jcommander.Parameter;
 
 /**
- * A simple map reduce job that inserts word counts into accumulo. See the README for instructions on how to run this.
+ * A simple map reduce job that inserts word counts into accumulo. See the README for instructions
+ * on how to run this.
  *
  */
 public class WordCount extends Configured implements Tool {
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java
index 1d4261a..6b9a2dd 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/BulkIngestExample.java
@@ -48,7 +48,8 @@
 import org.apache.hadoop.util.ToolRunner;
 
 /**
- * Example map reduce job that bulk ingest data into an accumulo table. The expected input is text files containing tab separated key value pairs on each line.
+ * Example map reduce job that bulk ingest data into an accumulo table. The expected input is text
+ * files containing tab separated key value pairs on each line.
  */
 
 public class BulkIngestExample extends Configured implements Tool {
@@ -60,7 +61,8 @@
     private Text outputValue = new Text();
 
     @Override
-    public void map(LongWritable key, Text value, Context output) throws IOException, InterruptedException {
+    public void map(LongWritable key, Text value, Context output)
+        throws IOException, InterruptedException {
       // split on tab
       int index = -1;
       for (int i = 0; i < value.getLength(); i++) {
@@ -80,7 +82,8 @@ public void map(LongWritable key, Text value, Context output) throws IOException
 
   public static class ReduceClass extends Reducer<Text,Text,Key,Value> {
     @Override
-    public void reduce(Text key, Iterable<Text> values, Context output) throws IOException, InterruptedException {
+    public void reduce(Text key, Iterable<Text> values, Context output)
+        throws IOException, InterruptedException {
       // be careful with the timestamp... if you run on a cluster
       // where the time is whacked you may not see your updates in
       // accumulo if there is already an existing value with a later
@@ -91,7 +94,8 @@ public void reduce(Text key, Iterable<Text> values, Context output) throws IOExc
 
       int index = 0;
       for (Text value : values) {
-        Key outputKey = new Key(key, new Text("colf"), new Text(String.format("col_%07d", index)), timestamp);
+        Key outputKey = new Key(key, new Text("colf"), new Text(String.format("col_%07d", index)),
+            timestamp);
         index++;
 
         Value outputValue = new Value(value.getBytes(), 0, value.getLength());
@@ -118,7 +122,8 @@ public int run(String[] args) {
       job.setReducerClass(ReduceClass.class);
       job.setOutputFormatClass(AccumuloFileOutputFormat.class);
 
-      ClientInfo info = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").info();
+      ClientInfo info = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+          .info();
       AccumuloClient client = Accumulo.newClient().usingClientInfo(info).build();
       AccumuloInputFormat.setClientInfo(job, info);
       AccumuloInputFormat.setInputTableName(job, SetupTable.tableName);
@@ -149,7 +154,8 @@ public int run(String[] args) {
       // With HDFS permissions on, we need to make sure the Accumulo user can read/move the rfiles
       FsShell fsShell = new FsShell(conf);
       fsShell.run(new String[] {"-chmod", "-R", "777", workDir});
-      client.tableOperations().importDirectory(SetupTable.tableName, workDir + "/files", workDir + "/failures", false);
+      client.tableOperations().importDirectory(SetupTable.tableName, workDir + "/files",
+          workDir + "/failures", false);
 
     } catch (Exception e) {
       throw new RuntimeException(e);
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java
index e7f3bd5..add39c0 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/SetupTable.java
@@ -28,7 +28,6 @@
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 
-
 public class SetupTable {
 
   static String[] splits = {"row_00000333", "row_00000666"};
@@ -37,11 +36,12 @@
   static String outputFile = "bulk/test_1.txt";
 
   public static void main(String[] args) throws Exception {
-    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+        .build();
     try {
       client.tableOperations().create(tableName);
     } catch (TableExistsException e) {
-      //ignore
+      // ignore
     }
 
     // create a table with initial partitions
@@ -52,7 +52,8 @@ public static void main(String[] args) throws Exception {
     client.tableOperations().addSplits(tableName, intialPartitions);
 
     FileSystem fs = FileSystem.get(new Configuration());
-    try (PrintStream out = new PrintStream(new BufferedOutputStream(fs.create(new Path(outputFile))))) {
+    try (PrintStream out = new PrintStream(
+        new BufferedOutputStream(fs.create(new Path(outputFile))))) {
       // create some data in outputFile
       for (int i = 0; i < numRows; i++) {
         out.println(String.format("row_%010d\tvalue_%010d", i, i));
diff --git a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java
index 09495db..d9818eb 100644
--- a/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java
+++ b/src/main/java/org/apache/accumulo/examples/mapreduce/bulk/VerifyIngest.java
@@ -35,8 +35,10 @@
 public class VerifyIngest {
   private static final Logger log = LoggerFactory.getLogger(VerifyIngest.class);
 
-  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
-    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+  public static void main(String[] args)
+      throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+        .build();
     Scanner scanner = client.createScanner(SetupTable.tableName, Authorizations.EMPTY);
 
     scanner.setRange(new Range(String.format("row_%010d", 0), null));
@@ -51,12 +53,14 @@ public static void main(String[] args) throws AccumuloException, AccumuloSecurit
         Entry<Key,Value> entry = si.next();
 
         if (!entry.getKey().getRow().toString().equals(String.format("row_%010d", i))) {
-          log.error("unexpected row key " + entry.getKey().getRow().toString() + " expected " + String.format("row_%010d", i));
+          log.error("unexpected row key " + entry.getKey().getRow().toString() + " expected "
+              + String.format("row_%010d", i));
           ok = false;
         }
 
         if (!entry.getValue().toString().equals(String.format("value_%010d", i))) {
-          log.error("unexpected value " + entry.getValue().toString() + " expected " + String.format("value_%010d", i));
+          log.error("unexpected value " + entry.getValue().toString() + " expected "
+              + String.format("value_%010d", i));
           ok = false;
         }
 
diff --git a/src/main/java/org/apache/accumulo/examples/reservations/ARS.java b/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
index 1297efc..4240156 100644
--- a/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
+++ b/src/main/java/org/apache/accumulo/examples/reservations/ARS.java
@@ -40,15 +40,18 @@
 import jline.console.ConsoleReader;
 
 /**
- * Accumulo Reservation System : An example reservation system using Accumulo. Supports atomic reservations of a resource at a date. Wait list are also
- * supported. In order to keep the example simple, no checking is done of the date. Also the code is inefficient, if interested in improving it take a look at
- * the EXCERCISE comments.
+ * Accumulo Reservation System : An example reservation system using Accumulo. Supports atomic
+ * reservations of a resource at a date. Wait list are also supported. In order to keep the example
+ * simple, no checking is done of the date. Also the code is inefficient, if interested in improving
+ * it take a look at the EXCERCISE comments.
  */
 
-// EXCERCISE create a test that verifies correctness under concurrency. For example, have M threads making reservations against N resources. Each thread could
-// randomly reserve and cancel resources for a single user. When each thread finishes, it knows what the state of its single user should be. When all threads
-// finish, collect their expected state and verify the status of all users and resources. For extra credit run the test on a IAAS provider using 10 nodes and
-// 10 threads per node.
+// EXCERCISE create a test that verifies correctness under concurrency. For example, have M threads
+// making reservations against N resources. Each thread could randomly reserve and cancel resources
+// for a single user. When each thread finishes, it knows what the state of its single user should
+// be. When all threads finish, collect their expected state and verify the status of all users and
+// resources. For extra credit run the test on a IAAS provider using 10 nodes and 10 threads per
+// node.
 
 public class ARS {
 
@@ -67,7 +70,8 @@ public ARS(AccumuloClient client, String rTable) {
   }
 
   public List<String> setCapacity(String what, String when, int count) {
-    // EXCERCISE implement this method which atomically sets a capacity and returns anyone who was moved to the wait list if the capacity was decreased
+    // EXCERCISE implement this method which atomically sets a capacity and returns anyone who was
+    // moved to the wait list if the capacity was decreased
 
     throw new UnsupportedOperationException();
   }
@@ -76,10 +80,12 @@ public ReservationResult reserve(String what, String when, String who) throws Ex
 
     String row = what + ":" + when;
 
-    // EXCERCISE This code assumes there is no reservation and tries to create one. If a reservation exist then the update will fail. This is a good strategy
-    // when it is expected there are usually no reservations. Could modify the code to scan first.
+    // EXCERCISE This code assumes there is no reservation and tries to create one. If a reservation
+    // exist then the update will fail. This is a good strategy when it is expected there are
+    // usually no reservations. Could modify the code to scan first.
 
-    // The following mutation requires that the column tx:seq does not exist and will fail if it does.
+    // The following mutation requires that the column tx:seq does not exist and will fail if it
+    // does.
     ConditionalMutation update = new ConditionalMutation(row, new Condition("tx", "seq"));
     update.put("tx", "seq", "0");
     update.put("res", String.format("%04d", 0), who);
@@ -87,7 +93,9 @@ public ReservationResult reserve(String what, String when, String who) throws Ex
     ReservationResult result = ReservationResult.RESERVED;
 
     // it is important to use an isolated scanner so that only whole mutations are seen
-    try (ConditionalWriter cwriter = client.createConditionalWriter(rTable, new ConditionalWriterConfig());
+    try (
+        ConditionalWriter cwriter = client.createConditionalWriter(rTable,
+            new ConditionalWriterConfig());
         Scanner scanner = new IsolatedScanner(client.createScanner(rTable, Authorizations.EMPTY))) {
       while (true) {
         Status status = cwriter.write(update).getStatus();
@@ -102,10 +110,13 @@ public ReservationResult reserve(String what, String when, String who) throws Ex
             throw new RuntimeException("Unexpected status " + status);
         }
 
-        // EXCERCISE in the case of many threads trying to reserve a slot, this approach of immediately retrying is inefficient. Exponential back-off is good
-        // general solution to solve contention problems like this. However in this particular case, exponential back-off could penalize the earliest threads
-        // that attempted to make a reservation by putting them later in the list. A more complex solution could involve having independent sub-queues within
-        // the row that approximately maintain arrival order and use exponential back off to fairly merge the sub-queues into the main queue.
+        // EXCERCISE in the case of many threads trying to reserve a slot, this approach of
+        // immediately retrying is inefficient. Exponential back-off is good general solution to
+        // solve contention problems like this. However in this particular case, exponential
+        // back-off could penalize the earliest threads that attempted to make a reservation by
+        // putting them later in the list. A more complex solution could involve having independent
+        // sub-queues within the row that approximately maintain arrival order and use exponential
+        // back off to fairly merge the sub-queues into the main queue.
 
         scanner.setRange(new Range(row));
 
@@ -120,16 +131,19 @@ public ReservationResult reserve(String what, String when, String who) throws Ex
           if (cf.equals("tx") && cq.equals("seq")) {
             seq = Integer.parseInt(val);
           } else if (cf.equals("res")) {
-            // EXCERCISE scanning the entire list to find if reserver is already in the list is inefficient. One possible way to solve this would be to sort the
-            // data differently in Accumulo so that finding the reserver could be done quickly.
+            // EXCERCISE scanning the entire list to find if reserver is already in the list is
+            // inefficient. One possible way to solve this would be to sort the data differently in
+            // Accumulo so that finding the reserver could be done quickly.
             if (val.equals(who))
               if (maxReservation == -1)
                 return ReservationResult.RESERVED; // already have the first reservation
               else
                 return ReservationResult.WAIT_LISTED; // already on wait list
 
-            // EXCERCISE the way this code finds the max reservation is very inefficient.... it would be better if it did not have to scan the entire row.
-            // One possibility is to just use the sequence number. Could also consider sorting the data in another way and/or using an iterator.
+            // EXCERCISE the way this code finds the max reservation is very inefficient.... it
+            // would be better if it did not have to scan the entire row. One possibility is to just
+            // use the sequence number. Could also consider sorting the data in another way and/or
+            // using an iterator.
             maxReservation = Integer.parseInt(cq);
           }
         }
@@ -155,12 +169,16 @@ public void cancel(String what, String when, String who) throws Exception {
 
     String row = what + ":" + when;
 
-    // Even though this method is only deleting a column, its important to use a conditional writer. By updating the seq # when deleting a reservation, it
-    // will cause any concurrent reservations to retry. If this delete were done using a batch writer, then a concurrent reservation could report WAIT_LISTED
+    // Even though this method is only deleting a column, its important to use a conditional writer.
+    // By updating the seq # when deleting a reservation, it
+    // will cause any concurrent reservations to retry. If this delete were done using a batch
+    // writer, then a concurrent reservation could report WAIT_LISTED
     // when it actually got the reservation.
 
     // its important to use an isolated scanner so that only whole mutations are seen
-    try (ConditionalWriter cwriter = client.createConditionalWriter(rTable, new ConditionalWriterConfig());
+    try (
+        ConditionalWriter cwriter = client.createConditionalWriter(rTable,
+            new ConditionalWriterConfig());
         Scanner scanner = new IsolatedScanner(client.createScanner(rTable, Authorizations.EMPTY))) {
       while (true) {
         scanner.setRange(new Range(row));
@@ -183,7 +201,8 @@ public void cancel(String what, String when, String who) throws Exception {
         }
 
         if (reservation != null) {
-          ConditionalMutation update = new ConditionalMutation(row, new Condition("tx", "seq").setValue(seq + ""));
+          ConditionalMutation update = new ConditionalMutation(row,
+              new Condition("tx", "seq").setValue(seq + ""));
           update.putDelete("res", reservation);
           update.put("tx", "seq", (seq + 1) + "");
 
@@ -214,7 +233,8 @@ public void cancel(String what, String when, String who) throws Exception {
     String row = what + ":" + when;
 
     // its important to use an isolated scanner so that only whole mutations are seen
-    try (Scanner scanner = new IsolatedScanner(client.createScanner(rTable, Authorizations.EMPTY))) {
+    try (
+        Scanner scanner = new IsolatedScanner(client.createScanner(rTable, Authorizations.EMPTY))) {
       scanner.setRange(new Range(row));
       scanner.fetchColumnFamily(new Text("res"));
 
@@ -241,7 +261,8 @@ public static void main(String[] args) throws Exception {
       final String[] tokens = line.split("\\s+");
 
       if (tokens[0].equals("reserve") && tokens.length >= 4 && ars != null) {
-        // start up multiple threads all trying to reserve the same resource, no more than one should succeed
+        // start up multiple threads all trying to reserve the same resource, no more than one
+        // should succeed
 
         final ARS fars = ars;
         ArrayList<Thread> threads = new ArrayList<>();
@@ -251,7 +272,8 @@ public static void main(String[] args) throws Exception {
             @Override
             public void run() {
               try {
-                reader.println("  " + String.format("%20s", tokens[whoIndex]) + " : " + fars.reserve(tokens[1], tokens[2], tokens[whoIndex]));
+                reader.println("  " + String.format("%20s", tokens[whoIndex]) + " : "
+                    + fars.reserve(tokens[1], tokens[2], tokens[whoIndex]));
               } catch (Exception e) {
                 log.warn("Could not write to the ConsoleReader.", e);
               }
@@ -282,7 +304,7 @@ public void run() {
         AccumuloClient client = Accumulo.newClient().forInstance(tokens[1], tokens[2])
             .usingPassword(tokens[3], tokens[4]).build();
         if (client.tableOperations().exists(tokens[5])) {
-          ars = new ARS(client,  tokens[5]);
+          ars = new ARS(client, tokens[5]);
           reader.println("  connected");
         } else
           reader.println("  No Such Table");
diff --git a/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java b/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java
index 907ad9b..92e44f2 100644
--- a/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java
+++ b/src/main/java/org/apache/accumulo/examples/sample/SampleExample.java
@@ -40,14 +40,17 @@
 import com.google.common.collect.ImmutableMap;
 
 /**
- * A simple example of using Accumulo's sampling feature. This example does something similar to what README.sample shows using the shell. Also see
- * {@link CutoffIntersectingIterator} and README.sample for an example of how to use sample data from within an iterator.
+ * A simple example of using Accumulo's sampling feature. This example does something similar to
+ * what README.sample shows using the shell. Also see {@link CutoffIntersectingIterator} and
+ * README.sample for an example of how to use sample data from within an iterator.
  */
 public class SampleExample {
 
-  // a compaction strategy that only selects files for compaction that have no sample data or sample data created in a different way than the tables
+  // a compaction strategy that only selects files for compaction that have no sample data or sample
+  // data created in a different way than the tables
   static final CompactionStrategyConfig NO_SAMPLE_STRATEGY = new CompactionStrategyConfig(
-      "org.apache.accumulo.tserver.compaction.strategies.ConfigurableCompactionStrategy").setOptions(Collections.singletonMap("SF_NO_SAMPLE", ""));
+      "org.apache.accumulo.tserver.compaction.strategies.ConfigurableCompactionStrategy")
+          .setOptions(Collections.singletonMap("SF_NO_SAMPLE", ""));
 
   static class Opts extends ClientOnDefaultTable {
     public Opts() {
@@ -73,7 +76,8 @@ public static void main(String[] args) throws Exception {
     BatchWriter bw = client.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
     bw.addMutation(createMutation("9225", "abcde", "file://foo.txt"));
     bw.addMutation(createMutation("8934", "accumulo scales", "file://accumulo_notes.txt"));
-    bw.addMutation(createMutation("2317", "milk, eggs, bread, parmigiano-reggiano", "file://groceries/9/txt"));
+    bw.addMutation(
+        createMutation("2317", "milk, eggs, bread, parmigiano-reggiano", "file://groceries/9/txt"));
     bw.addMutation(createMutation("3900", "EC2 ate my homework", "file://final_project.txt"));
     bw.flush();
 
@@ -87,7 +91,8 @@ public static void main(String[] args) throws Exception {
     print(scanner);
     System.out.println();
 
-    System.out.println("Scanning with sampler configuration.  Data was written before sampler was set on table, scan should fail.");
+    System.out.println(
+        "Scanning with sampler configuration.  Data was written before sampler was set on table, scan should fail.");
     scanner.setSamplerConfiguration(sc1);
     try {
       print(scanner);
@@ -97,16 +102,19 @@ public static void main(String[] args) throws Exception {
     System.out.println();
 
     // compact table to recreate sample data
-    client.tableOperations().compact(opts.getTableName(), new CompactionConfig().setCompactionStrategy(NO_SAMPLE_STRATEGY));
+    client.tableOperations().compact(opts.getTableName(),
+        new CompactionConfig().setCompactionStrategy(NO_SAMPLE_STRATEGY));
 
     System.out.println("Scanning after compaction (compaction should have created sample data) : ");
     print(scanner);
     System.out.println();
 
     // update a document in the sample data
-    bw.addMutation(createMutation("2317", "milk, eggs, bread, parmigiano-reggiano, butter", "file://groceries/9/txt"));
+    bw.addMutation(createMutation("2317", "milk, eggs, bread, parmigiano-reggiano, butter",
+        "file://groceries/9/txt"));
     bw.close();
-    System.out.println("Scanning sample after updating content for docId 2317 (should see content change in sample data) : ");
+    System.out.println(
+        "Scanning sample after updating content for docId 2317 (should see content change in sample data) : ");
     print(scanner);
     System.out.println();
 
@@ -115,9 +123,11 @@ public static void main(String[] args) throws Exception {
     sc2.setOptions(ImmutableMap.of("hasher", "murmur3_32", "modulus", "2"));
     client.tableOperations().setSamplerConfiguration(opts.getTableName(), sc2);
     // compact table to recreate sample data using new configuration
-    client.tableOperations().compact(opts.getTableName(), new CompactionConfig().setCompactionStrategy(NO_SAMPLE_STRATEGY));
+    client.tableOperations().compact(opts.getTableName(),
+        new CompactionConfig().setCompactionStrategy(NO_SAMPLE_STRATEGY));
 
-    System.out.println("Scanning with old sampler configuration.  Sample data was created using new configuration with a compaction.  Scan should fail.");
+    System.out.println(
+        "Scanning with old sampler configuration.  Sample data was created using new configuration with a compaction.  Scan should fail.");
     try {
       // try scanning with old sampler configuration
       print(scanner);
diff --git a/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java b/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java
index 2c56b23..9911b7b 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/ContinuousQuery.java
@@ -39,8 +39,9 @@
 import com.google.common.collect.Iterators;
 
 /**
- * Using the doc2word table created by Reverse.java, this program randomly selects N words per document. Then it continually queries a random set of words in
- * the shard table (created by {@link Index}) using the {@link IntersectingIterator}.
+ * Using the doc2word table created by Reverse.java, this program randomly selects N words per
+ * document. Then it continually queries a random set of words in the shard table (created by
+ * {@link Index}) using the {@link IntersectingIterator}.
  */
 public class ContinuousQuery {
 
@@ -63,9 +64,11 @@ public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(ContinuousQuery.class.getName(), args);
 
-    AccumuloClient client= Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+        .build();
 
-    ArrayList<Text[]> randTerms = findRandomTerms(client.createScanner(opts.doc2Term, Authorizations.EMPTY), opts.numTerms);
+    ArrayList<Text[]> randTerms = findRandomTerms(
+        client.createScanner(opts.doc2Term, Authorizations.EMPTY), opts.numTerms);
 
     Random rand = new Random();
 
@@ -120,7 +123,8 @@ public static void main(String[] args) throws Exception {
     return ret;
   }
 
-  private static void selectRandomWords(ArrayList<Text> words, ArrayList<Text[]> ret, Random rand, int numTerms) {
+  private static void selectRandomWords(ArrayList<Text> words, ArrayList<Text[]> ret, Random rand,
+      int numTerms) {
     if (words.size() >= numTerms) {
       Collections.shuffle(words, rand);
       Text docWords[] = new Text[numTerms];
diff --git a/src/main/java/org/apache/accumulo/examples/shard/CutoffIntersectingIterator.java b/src/main/java/org/apache/accumulo/examples/shard/CutoffIntersectingIterator.java
index 18fe914..c39e54b 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/CutoffIntersectingIterator.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/CutoffIntersectingIterator.java
@@ -36,7 +36,8 @@
 import org.apache.accumulo.core.iterators.user.IntersectingIterator;
 
 /**
- * This iterator uses a sample built from the Column Qualifier to quickly avoid intersecting iterator queries that may return too many documents.
+ * This iterator uses a sample built from the Column Qualifier to quickly avoid intersecting
+ * iterator queries that may return too many documents.
  */
 
 public class CutoffIntersectingIterator extends IntersectingIterator {
@@ -56,7 +57,8 @@ public boolean hasTop() {
   }
 
   @Override
-  public void seek(Range range, Collection<ByteSequence> seekColumnFamilies, boolean inclusive) throws IOException {
+  public void seek(Range range, Collection<ByteSequence> seekColumnFamilies, boolean inclusive)
+      throws IOException {
 
     sampleII.seek(range, seekColumnFamilies, inclusive);
 
@@ -68,7 +70,8 @@ public void seek(Range range, Collection<ByteSequence> seekColumnFamilies, boole
     }
 
     if (count > sampleMax) {
-      // In a real application would probably want to return a key value that indicates too much data. Since this would execute for each tablet, some tablets
+      // In a real application would probably want to return a key value that indicates too much
+      // data. Since this would execute for each tablet, some tablets
       // may return data. For tablets that did not return data, would want an indication.
       hasTop = false;
     } else {
@@ -78,7 +81,8 @@ public void seek(Range range, Collection<ByteSequence> seekColumnFamilies, boole
   }
 
   @Override
-  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
+  public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
+      IteratorEnvironment env) throws IOException {
     super.init(source, options, env);
 
     IteratorEnvironment sampleEnv = env.cloneWithSamplingEnabled();
@@ -93,16 +97,20 @@ public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> op
 
   static void validateSamplerConfig(SamplerConfiguration sampleConfig) {
     requireNonNull(sampleConfig);
-    checkArgument(sampleConfig.getSamplerClassName().equals(RowColumnSampler.class.getName()), "Unexpected Sampler " + sampleConfig.getSamplerClassName());
-    checkArgument(sampleConfig.getOptions().get("qualifier").equals("true"), "Expected sample on column qualifier");
-    checkArgument(isNullOrFalse(sampleConfig.getOptions(), "row", "family", "visibility"), "Expected sample on column qualifier only");
+    checkArgument(sampleConfig.getSamplerClassName().equals(RowColumnSampler.class.getName()),
+        "Unexpected Sampler " + sampleConfig.getSamplerClassName());
+    checkArgument(sampleConfig.getOptions().get("qualifier").equals("true"),
+        "Expected sample on column qualifier");
+    checkArgument(isNullOrFalse(sampleConfig.getOptions(), "row", "family", "visibility"),
+        "Expected sample on column qualifier only");
   }
 
   private void setMax(IteratorEnvironment sampleEnv, Map<String,String> options) {
     String cutoffValue = options.get("cutoff");
     SamplerConfiguration sampleConfig = sampleEnv.getSamplerConfiguration();
 
-    // Ensure the sample was constructed in an expected way. If the sample is not built as expected, then can not draw conclusions based on sample.
+    // Ensure the sample was constructed in an expected way. If the sample is not built as expected,
+    // then can not draw conclusions based on sample.
     requireNonNull(cutoffValue, "Expected cutoff option is missing");
     validateSamplerConfig(sampleConfig);
 
diff --git a/src/main/java/org/apache/accumulo/examples/shard/Index.java b/src/main/java/org/apache/accumulo/examples/shard/Index.java
index cd34816..53925bf 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/Index.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/Index.java
@@ -35,7 +35,8 @@
 /**
  * This program indexes a set of documents given on the command line into a shard table.
  *
- * What it writes to the table is row = partition id, column family = term, column qualifier = document id.
+ * What it writes to the table is row = partition id, column family = term, column qualifier =
+ * document id.
  */
 public class Index {
 
@@ -43,7 +44,8 @@ static Text genPartition(int partition) {
     return new Text(String.format("%08x", Math.abs(partition)));
   }
 
-  public static void index(int numPartitions, Text docId, String doc, String splitRegex, BatchWriter bw) throws Exception {
+  public static void index(int numPartitions, Text docId, String doc, String splitRegex,
+      BatchWriter bw) throws Exception {
 
     String[] tokens = doc.split(splitRegex);
 
@@ -66,7 +68,8 @@ public static void index(int numPartitions, Text docId, String doc, String split
       bw.addMutation(m);
   }
 
-  public static void index(int numPartitions, File src, String splitRegex, BatchWriter bw) throws Exception {
+  public static void index(int numPartitions, File src, String splitRegex, BatchWriter bw)
+      throws Exception {
     if (src.isDirectory()) {
       File[] files = src.listFiles();
       if (files != null) {
@@ -97,7 +100,8 @@ public static void index(int numPartitions, File src, String splitRegex, BatchWr
     @Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
     private String tableName;
 
-    @Parameter(names = "--partitions", required = true, description = "the number of shards to create")
+    @Parameter(names = "--partitions", required = true,
+        description = "the number of shards to create")
     int partitions;
 
     @Parameter(required = true, description = "<file> { <file> ... }")
@@ -110,7 +114,8 @@ public static void main(String[] args) throws Exception {
 
     String splitRegex = "\\W+";
 
-    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+        .build();
 
     try (BatchWriter bw = client.createBatchWriter(opts.tableName)) {
       for (String filename : opts.files) {
diff --git a/src/main/java/org/apache/accumulo/examples/shard/Query.java b/src/main/java/org/apache/accumulo/examples/shard/Query.java
index 5377b41..9c396b0 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/Query.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/Query.java
@@ -37,7 +37,8 @@
 import com.beust.jcommander.Parameter;
 
 /**
- * This program queries a set of terms in the shard table (populated by {@link Index}) using the {@link IntersectingIterator}.
+ * This program queries a set of terms in the shard table (populated by {@link Index}) using the
+ * {@link IntersectingIterator}.
  */
 public class Query {
 
@@ -49,7 +50,8 @@
     @Parameter(names = {"-t", "--table"}, required = true, description = "table to use")
     private String tableName;
 
-    @Parameter(names = {"--sample"}, description = "Do queries against sample, useful when sample is built using column qualifier")
+    @Parameter(names = {"--sample"},
+        description = "Do queries against sample, useful when sample is built using column qualifier")
     private boolean useSample = false;
 
     @Parameter(names = {"--sampleCutoff"},
@@ -93,8 +95,10 @@ public static void main(String[] args) throws Exception {
 
     try (BatchScanner bs = client.createBatchScanner(opts.tableName, Authorizations.EMPTY, 10)) {
       if (opts.useSample) {
-        SamplerConfiguration samplerConfig = client.tableOperations().getSamplerConfiguration(opts.tableName);
-        CutoffIntersectingIterator.validateSamplerConfig(client.tableOperations().getSamplerConfiguration(opts.tableName));
+        SamplerConfiguration samplerConfig = client.tableOperations()
+            .getSamplerConfiguration(opts.tableName);
+        CutoffIntersectingIterator.validateSamplerConfig(
+            client.tableOperations().getSamplerConfiguration(opts.tableName));
         bs.setSamplerConfiguration(samplerConfig);
       }
       for (String entry : query(bs, opts.terms, opts.sampleCutoff)) {
diff --git a/src/main/java/org/apache/accumulo/examples/shard/Reverse.java b/src/main/java/org/apache/accumulo/examples/shard/Reverse.java
index d66a0bd..3db5601 100644
--- a/src/main/java/org/apache/accumulo/examples/shard/Reverse.java
+++ b/src/main/java/org/apache/accumulo/examples/shard/Reverse.java
@@ -32,8 +32,9 @@
 import com.beust.jcommander.Parameter;
 
 /**
- * The program reads an accumulo table written by {@link Index} and writes out to another table. It writes out a mapping of documents to terms. The document to
- * term mapping is used by {@link ContinuousQuery}.
+ * The program reads an accumulo table written by {@link Index} and writes out to another table. It
+ * writes out a mapping of documents to terms. The document to term mapping is used by
+ * {@link ContinuousQuery}.
  */
 public class Reverse {
 
@@ -50,11 +51,12 @@ public static void main(String[] args) throws Exception {
     Opts opts = new Opts();
     opts.parseArgs(Reverse.class.getName(), args);
 
-    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties").build();
+    AccumuloClient client = Accumulo.newClient().usingProperties("conf/accumulo-client.properties")
+        .build();
 
     try (Scanner scanner = client.createScanner(opts.shardTable, Authorizations.EMPTY);
-         BatchWriter bw = client.createBatchWriter(opts.doc2TermTable)) {
-      for (Entry<Key, Value> entry : scanner) {
+        BatchWriter bw = client.createBatchWriter(opts.doc2TermTable)) {
+      for (Entry<Key,Value> entry : scanner) {
         Key key = entry.getKey();
         Mutation m = new Mutation(key.getColumnQualifier());
         m.put(key.getColumnFamily(), new Text(), new Value(new byte[0]));
diff --git a/src/test/java/org/apache/accumulo/examples/ExamplesIT.java b/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
index 5b89c8d..bddddff 100644
--- a/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
+++ b/src/test/java/org/apache/accumulo/examples/ExamplesIT.java
@@ -55,9 +55,7 @@
 import org.apache.accumulo.core.iterators.user.AgeOffFilter;
 import org.apache.accumulo.core.iterators.user.SummingCombiner;
 import org.apache.accumulo.core.security.Authorizations;
-import org.apache.accumulo.examples.client.Flush;
 import org.apache.accumulo.examples.client.RandomBatchScanner;
-import org.apache.accumulo.examples.client.RandomBatchWriter;
 import org.apache.accumulo.examples.client.ReadWriteExample;
 import org.apache.accumulo.examples.client.RowOperations;
 import org.apache.accumulo.examples.client.SequentialBatchWriter;
@@ -84,7 +82,6 @@
 import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloClusterImpl.LogWriter;
 import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
-import org.apache.accumulo.start.Main;
 import org.apache.accumulo.test.TestIngest;
 import org.apache.accumulo.tracer.TraceServer;
 import org.apache.hadoop.conf.Configuration;
@@ -145,11 +142,13 @@ public void getClusterInfo() throws Exception {
   @After
   public void resetAuths() throws Exception {
     if (null != origAuths) {
-      getAccumuloClient().securityOperations().changeUserAuthorizations(getAdminPrincipal(), origAuths);
+      getAccumuloClient().securityOperations().changeUserAuthorizations(getAdminPrincipal(),
+          origAuths);
     }
   }
 
-  public static void writeClientPropsFile(String file, String instance, String keepers, String user, String password) throws IOException {
+  public static void writeClientPropsFile(String file, String instance, String keepers, String user,
+      String password) throws IOException {
     try (BufferedWriter writer = Files.newBufferedWriter(Paths.get(file))) {
       writer.write("instance.name=" + instance + "\n");
       writer.write("instance.zookeepers=" + keepers + "\n");
@@ -177,9 +176,12 @@ public void testTrace() throws Exception {
       while (!c.tableOperations().exists("trace"))
         sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
     }
-    String[] args = new String[] {"-c", getClientPropsFile(), "--createtable", "--deletetable", "--create"};
-    Entry<Integer,String> pair = cluster.getClusterControl().execWithStdout(TracingExample.class, args);
-    Assert.assertEquals("Expected return code of zero. STDOUT=" + pair.getValue(), 0, pair.getKey().intValue());
+    String[] args = new String[] {"-c", getClientPropsFile(), "--createtable", "--deletetable",
+        "--create"};
+    Entry<Integer,String> pair = cluster.getClusterControl().execWithStdout(TracingExample.class,
+        args);
+    Assert.assertEquals("Expected return code of zero. STDOUT=" + pair.getValue(), 0,
+        pair.getKey().intValue());
     String result = pair.getValue();
     Pattern pattern = Pattern.compile("TraceID: ([0-9a-f]+)");
     Matcher matcher = pattern.matcher(result);
@@ -204,7 +206,8 @@ public void testDirList() throws Exception {
     String dirListDirectory;
     switch (getClusterType()) {
       case MINI:
-        dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir().getAbsolutePath();
+        dirListDirectory = ((MiniAccumuloClusterImpl) getCluster()).getConfig().getDir()
+            .getAbsolutePath();
         break;
       case STANDALONE:
         dirListDirectory = ((StandaloneAccumuloCluster) getCluster()).getAccumuloHome();
@@ -213,12 +216,15 @@ public void testDirList() throws Exception {
         throw new RuntimeException("Unknown cluster type");
     }
     assumeTrue(new File(dirListDirectory).exists());
-    // Index a directory listing on /tmp. If this is running against a standalone cluster, we can't guarantee Accumulo source will be there.
-    args = new String[] {"-c", getClientPropsFile(), "--dirTable", dirTable, "--indexTable", indexTable, "--dataTable", dataTable, "--vis", visibility,
-        "--chunkSize", Integer.toString(10000), dirListDirectory};
+    // Index a directory listing on /tmp. If this is running against a standalone cluster, we can't
+    // guarantee Accumulo source will be there.
+    args = new String[] {"-c", getClientPropsFile(), "--dirTable", dirTable, "--indexTable",
+        indexTable, "--dataTable", dataTable, "--vis", visibility, "--chunkSize",
+        Integer.toString(10000), dirListDirectory};
 
     Entry<Integer,String> entry = getClusterControl().execWithStdout(Ingest.class, args);
-    assertEquals("Got non-zero return code. Stdout=" + entry.getValue(), 0, entry.getKey().intValue());
+    assertEquals("Got non-zero return code. Stdout=" + entry.getValue(), 0,
+        entry.getKey().intValue());
 
     String expectedFile;
     switch (getClusterType()) {
@@ -234,7 +240,8 @@ public void testDirList() throws Exception {
         throw new RuntimeException("Unknown cluster type");
     }
 
-    args = new String[] {"-c", getClientPropsFile(), "-t", indexTable, "--auths", auths, "--search", "--path", expectedFile};
+    args = new String[] {"-c", getClientPropsFile(), "-t", indexTable, "--auths", auths, "--search",
+        "--path", expectedFile};
     entry = getClusterControl().execWithStdout(QueryUtil.class, args);
     if (ClusterType.MINI == getClusterType()) {
       MiniAccumuloClusterImpl impl = (MiniAccumuloClusterImpl) cluster;
@@ -325,11 +332,13 @@ public void testShardedIndex() throws Exception {
     }
     assertTrue(thisFile);
 
-    String[] args = new String[] {"-c", getClientPropsFile(), "--shardTable", shard, "--doc2Term", index};
+    String[] args = new String[] {"-c", getClientPropsFile(), "--shardTable", shard, "--doc2Term",
+        index};
 
     // create a reverse index
     goodExec(Reverse.class, args);
-    args = new String[] {"-c", getClientPropsFile(), "--shardTable", shard, "--doc2Term", index, "--terms", "5", "--count", "1000"};
+    args = new String[] {"-c", getClientPropsFile(), "--shardTable", shard, "--doc2Term", index,
+        "--terms", "5", "--count", "1000"};
     // run some queries
     goodExec(ContinuousQuery.class, args);
   }
@@ -356,14 +365,15 @@ public void testTeraSortAndRead() throws Exception {
     // TODO Figure out a way to run M/R with Kerberos
     assumeTrue(getAdminToken() instanceof PasswordToken);
     String tableName = getUniqueNames(1)[0];
-    String[] args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv", "10", "-xv", "10", "-t", tableName, "-c", getClientPropsFile(),
-        "--splits", "4"};
+    String[] args = new String[] {"--count", (1000 * 1000) + "", "-nk", "10", "-xk", "10", "-nv",
+        "10", "-xv", "10", "-t", tableName, "-c", getClientPropsFile(), "--splits", "4"};
     goodExec(TeraSortIngest.class, args);
     Path output = new Path(dir, "tmp/nines");
     if (fs.exists(output)) {
       fs.delete(output, true);
     }
-    args = new String[] {"-c", getClientPropsFile(), "-t", tableName, "--rowRegex", ".*999.*", "--output", output.toString()};
+    args = new String[] {"-c", getClientPropsFile(), "-t", tableName, "--rowRegex", ".*999.*",
+        "--output", output.toString()};
     goodExec(RegexExample.class, args);
     args = new String[] {"-c", getClientPropsFile(), "-t", tableName, "--column", "c:"};
     goodExec(RowHash.class, args);
@@ -371,7 +381,8 @@ public void testTeraSortAndRead() throws Exception {
     if (fs.exists(output)) {
       fs.delete(output, true);
     }
-    args = new String[] {"-c", getClientPropsFile(), "-t", tableName, "--output", output.toString()};
+    args = new String[] {"-c", getClientPropsFile(), "-t", tableName, "--output",
+        output.toString()};
     goodExec(TableToFile.class, args);
   }
 
@@ -382,7 +393,8 @@ public void testWordCount() throws Exception {
     String tableName = getUniqueNames(1)[0];
     c.tableOperations().create(tableName);
     is = new IteratorSetting(10, SummingCombiner.class);
-    SummingCombiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column(new Text("count"))));
+    SummingCombiner.setColumns(is,
+        Collections.singletonList(new IteratorSetting.Column(new Text("count"))));
     SummingCombiner.setEncodingType(is, SummingCombiner.Type.STRING);
     c.tableOperations().attachIterator(tableName, is);
     Path readme = new Path(new Path(System.getProperty("user.dir")).getParent(), "README.md");
@@ -407,14 +419,16 @@ public void testInsertWithBatchWriterAndReadData() throws Exception {
   @Test
   public void testIsolatedScansWithInterference() throws Exception {
     String[] args;
-    args = new String[] {"-c", getClientPropsFile(), "-t", getUniqueNames(1)[0], "--iterations", "100000", "--isolated"};
+    args = new String[] {"-c", getClientPropsFile(), "-t", getUniqueNames(1)[0], "--iterations",
+        "100000", "--isolated"};
     goodExec(InterferenceTest.class, args);
   }
 
   @Test
   public void testScansWithInterference() throws Exception {
     String[] args;
-    args = new String[] {"-c", getClientPropsFile(), "-t", getUniqueNames(1)[0], "--iterations", "100000"};
+    args = new String[] {"-c", getClientPropsFile(), "-t", getUniqueNames(1)[0], "--iterations",
+        "100000"};
     goodExec(InterferenceTest.class, args);
   }
 
@@ -430,7 +444,7 @@ public void testSequentialBatchWriter() throws Exception {
 
   @Test
   public void testReadWriteAndDelete() throws Exception {
-    goodExec(ReadWriteExample.class,"-c", getClientPropsFile());
+    goodExec(ReadWriteExample.class, "-c", getClientPropsFile());
   }
 
   @Test
@@ -438,13 +452,15 @@ public void testRandomBatchScanner() throws Exception {
     goodExec(RandomBatchScanner.class, "-c", getClientPropsFile());
   }
 
-  private void goodExec(Class<?> theClass, String... args) throws InterruptedException, IOException {
+  private void goodExec(Class<?> theClass, String... args)
+      throws InterruptedException, IOException {
     Entry<Integer,String> pair;
     if (Tool.class.isAssignableFrom(theClass) && ClusterType.STANDALONE == getClusterType()) {
       StandaloneClusterControl control = (StandaloneClusterControl) getClusterControl();
       pair = control.execMapreduceWithStdout(theClass, args);
     } else {
-      // We're already slurping stdout into memory (not redirecting to file). Might as well add it to error message.
+      // We're already slurping stdout into memory (not redirecting to file). Might as well add it
+      // to error message.
       pair = getClusterControl().execWithStdout(theClass, args);
     }
     Assert.assertEquals("stdout=" + pair.getValue(), 0, pair.getKey().intValue());
diff --git a/src/test/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraintTest.java b/src/test/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraintTest.java
index 0f4407f..5a1fcc0 100644
--- a/src/test/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraintTest.java
+++ b/src/test/java/org/apache/accumulo/examples/constraints/AlphaNumKeyConstraintTest.java
@@ -39,15 +39,20 @@ public void test() {
     // Check that violations are in row, cf, cq order
     Mutation badMutation = new Mutation(new Text("Row#1"));
     badMutation.put(new Text("Colf$2"), new Text("Colq%3"), new Value("value".getBytes()));
-    assertEquals(ImmutableList.of(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ),
+    assertEquals(
+        ImmutableList.of(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW,
+            AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF, AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ),
         ankc.check(null, badMutation));
   }
 
   @Test
   public void testGetViolationDescription() {
-    assertEquals(AlphaNumKeyConstraint.ROW_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW));
-    assertEquals(AlphaNumKeyConstraint.COLF_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF));
-    assertEquals(AlphaNumKeyConstraint.COLQ_VIOLATION_MESSAGE, ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ));
+    assertEquals(AlphaNumKeyConstraint.ROW_VIOLATION_MESSAGE,
+        ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_ROW));
+    assertEquals(AlphaNumKeyConstraint.COLF_VIOLATION_MESSAGE,
+        ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLF));
+    assertEquals(AlphaNumKeyConstraint.COLQ_VIOLATION_MESSAGE,
+        ankc.getViolationDescription(AlphaNumKeyConstraint.NON_ALPHA_NUM_COLQ));
     assertNull(ankc.getViolationDescription((short) 4));
   }
 }
diff --git a/src/test/java/org/apache/accumulo/examples/constraints/NumericValueConstraintTest.java b/src/test/java/org/apache/accumulo/examples/constraints/NumericValueConstraintTest.java
index 7004710..fec92bc 100644
--- a/src/test/java/org/apache/accumulo/examples/constraints/NumericValueConstraintTest.java
+++ b/src/test/java/org/apache/accumulo/examples/constraints/NumericValueConstraintTest.java
@@ -40,12 +40,14 @@ public void testCheck() {
     Mutation badMutation = new Mutation(new Text("r"));
     badMutation.put(new Text("cf"), new Text("cq"), new Value("foo1234".getBytes()));
     badMutation.put(new Text("cf2"), new Text("cq2"), new Value("foo1234".getBytes()));
-    assertEquals(NumericValueConstraint.NON_NUMERIC_VALUE, Iterables.getOnlyElement(nvc.check(null, badMutation)).shortValue());
+    assertEquals(NumericValueConstraint.NON_NUMERIC_VALUE,
+        Iterables.getOnlyElement(nvc.check(null, badMutation)).shortValue());
   }
 
   @Test
   public void testGetViolationDescription() {
-    assertEquals(NumericValueConstraint.VIOLATION_MESSAGE, nvc.getViolationDescription(NumericValueConstraint.NON_NUMERIC_VALUE));
+    assertEquals(NumericValueConstraint.VIOLATION_MESSAGE,
+        nvc.getViolationDescription(NumericValueConstraint.NON_NUMERIC_VALUE));
     assertNull(nvc.getViolationDescription((short) 2));
   }
 }
diff --git a/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java b/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
index 4533c9b..88b5f81 100644
--- a/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
+++ b/src/test/java/org/apache/accumulo/examples/dirlist/CountIT.java
@@ -66,8 +66,10 @@ public void setupInstance() throws Exception {
     bw.addMutation(Ingest.buildMutation(cv, "/local/user2", true, false, true, 272, 12345, null));
     bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 12345, null));
     bw.addMutation(Ingest.buildMutation(cv, "/local/file", false, false, false, 1024, 23456, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null));
-    bw.addMutation(Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null));
+    bw.addMutation(
+        Ingest.buildMutation(cv, "/local/user1/file1", false, false, false, 2024, 12345, null));
+    bw.addMutation(
+        Ingest.buildMutation(cv, "/local/user1/file2", false, false, false, 1028, 23456, null));
     bw.close();
   }
 
@@ -79,7 +81,8 @@ public void test() throws Exception {
 
     ScannerOpts scanOpts = new ScannerOpts();
     BatchWriterOpts bwOpts = new BatchWriterOpts();
-    FileCount fc = new FileCount(client,  tableName, Authorizations.EMPTY, new ColumnVisibility(), scanOpts, bwOpts);
+    FileCount fc = new FileCount(client, tableName, Authorizations.EMPTY, new ColumnVisibility(),
+        scanOpts, bwOpts);
     fc.run();
 
     ArrayList<Pair<String,String>> expected = new ArrayList<>();
diff --git a/src/test/java/org/apache/accumulo/examples/filedata/ChunkCombinerTest.java b/src/test/java/org/apache/accumulo/examples/filedata/ChunkCombinerTest.java
index 515aea5..53da97d 100644
--- a/src/test/java/org/apache/accumulo/examples/filedata/ChunkCombinerTest.java
+++ b/src/test/java/org/apache/accumulo/examples/filedata/ChunkCombinerTest.java
@@ -80,7 +80,8 @@ public void next() throws IOException {
       entry = null;
       while (iter.hasNext()) {
         entry = iter.next();
-        if (columnFamilies.size() > 0 && !columnFamilies.contains(entry.getKey().getColumnFamilyData())) {
+        if (columnFamilies.size() > 0
+            && !columnFamilies.contains(entry.getKey().getColumnFamilyData())) {
           entry = null;
           continue;
         }
@@ -91,7 +92,8 @@ public void next() throws IOException {
     }
 
     @Override
-    public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
+    public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive)
+        throws IOException {
       if (!inclusive) {
         throw new IllegalArgumentException("can only do inclusive colf filtering");
       }
@@ -111,7 +113,8 @@ public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean i
     }
 
     @Override
-    public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
+    public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options,
+        IteratorEnvironment env) throws IOException {
       throw new UnsupportedOperationException();
     }
   }
@@ -198,9 +201,11 @@ protected void setUp() {
     cRow3.put(new Key("row3", refs, "hash1\0x", "A&B"), new Value("".getBytes()));
     cRow3.put(new Key("row3", refs, "hash1\0y", "(A&B)"), new Value("".getBytes()));
     cRow3.put(new Key("row3", refs, "hash1\0z", "(F|G)&(D|E)"), new Value("".getBytes()));
-    cRow3.put(new Key("row3", chunk_cf, "0000", "((F|G)&(D|E))|(A&B)|(C&(D|E))", 20), new Value("V1".getBytes()));
+    cRow3.put(new Key("row3", chunk_cf, "0000", "((F|G)&(D|E))|(A&B)|(C&(D|E))", 20),
+        new Value("V1".getBytes()));
 
-    cOnlyRow3.put(new Key("row3", chunk_cf, "0000", "((F|G)&(D|E))|(A&B)|(C&(D|E))", 20), new Value("V1".getBytes()));
+    cOnlyRow3.put(new Key("row3", chunk_cf, "0000", "((F|G)&(D|E))|(A&B)|(C&(D|E))", 20),
+        new Value("V1".getBytes()));
 
     badrow.put(new Key("row1", chunk_cf, "0000", "A"), new Value("V1".getBytes()));
     badrow.put(new Key("row1", chunk_cf, "0000", "B"), new Value("V2".getBytes()));
@@ -234,7 +239,8 @@ public void test1() throws IOException {
     }
   }
 
-  private void runTest(boolean reseek, TreeMap<Key,Value> source, TreeMap<Key,Value> result, Collection<ByteSequence> cols) throws IOException {
+  private void runTest(boolean reseek, TreeMap<Key,Value> source, TreeMap<Key,Value> result,
+      Collection<ByteSequence> cols) throws IOException {
     MapIterator src = new MapIterator(source);
     SortedKeyValueIterator<Key,Value> iter = new ChunkCombiner();
     iter.init(src, null, null);
@@ -248,7 +254,8 @@ private void runTest(boolean reseek, TreeMap<Key,Value> source, TreeMap<Key,Valu
       seen.put(new Key(iter.getTopKey()), new Value(iter.getTopValue()));
 
       if (reseek)
-        iter.seek(new Range(iter.getTopKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL), true, null, true), cols, true);
+        iter.seek(new Range(iter.getTopKey().followingKey(PartialKey.ROW_COLFAM_COLQUAL), true,
+            null, true), cols, true);
       else
         iter.next();
     }
diff --git a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputFormatIT.java b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputFormatIT.java
index e8e87a9..5d4b8a5 100644
--- a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputFormatIT.java
+++ b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputFormatIT.java
@@ -60,8 +60,9 @@ public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration hadoo
     cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
   }
 
-  // track errors in the map reduce job; jobs insert a dummy error for the map and cleanup tasks (to ensure test correctness),
-  // so error tests should check to see if there is at least one error (could be more depending on the test) rather than zero
+  // track errors in the map reduce job; jobs insert a dummy error for the map and cleanup tasks (to
+  // ensure test correctness), so error tests should check to see if there is at least one error
+  // (could be more depending on the test) rather than zero
   private static Multimap<String,AssertionError> assertionErrors = ArrayListMultimap.create();
 
   private static final Authorizations AUTHS = new Authorizations("A", "B", "C", "D");
@@ -106,11 +107,13 @@ public static void entryEquals(Entry<Key,Value> e1, Entry<Key,Value> e2) {
   }
 
   public static class CIFTester extends Configured implements Tool {
-    public static class TestMapper extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
+    public static class TestMapper
+        extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
       int count = 0;
 
       @Override
-      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
+      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context)
+          throws IOException, InterruptedException {
         String table = context.getConfiguration().get("MRTester_tableName");
         assertNotNull(table);
 
@@ -158,11 +161,13 @@ protected void cleanup(Context context) throws IOException, InterruptedException
       }
     }
 
-    public static class TestNoClose extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
+    public static class TestNoClose
+        extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
       int count = 0;
 
       @Override
-      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
+      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context)
+          throws IOException, InterruptedException {
         String table = context.getConfiguration().get("MRTester_tableName");
         assertNotNull(table);
 
@@ -190,9 +195,11 @@ protected void map(List<Entry<Key,Value>> key, InputStream value, Context contex
       }
     }
 
-    public static class TestBadData extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
+    public static class TestBadData
+        extends Mapper<List<Entry<Key,Value>>,InputStream,List<Entry<Key,Value>>,InputStream> {
       @Override
-      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context) throws IOException, InterruptedException {
+      protected void map(List<Entry<Key,Value>> key, InputStream value, Context context)
+          throws IOException, InterruptedException {
         String table = context.getConfiguration().get("MRTester_tableName");
         assertNotNull(table);
 
@@ -230,7 +237,8 @@ protected void map(List<Entry<Key,Value>> key, InputStream value, Context contex
     @Override
     public int run(String[] args) throws Exception {
       if (args.length != 2) {
-        throw new IllegalArgumentException("Usage : " + CIFTester.class.getName() + " <table> <mapperClass>");
+        throw new IllegalArgumentException(
+            "Usage : " + CIFTester.class.getName() + " <table> <mapperClass>");
       }
 
       String table = args[0];
@@ -250,7 +258,8 @@ public int run(String[] args) throws Exception {
       ChunkInputFormat.setScanAuthorizations(job, AUTHS);
 
       @SuppressWarnings("unchecked")
-      Class<? extends Mapper<?,?,?,?>> forName = (Class<? extends Mapper<?,?,?,?>>) Class.forName(args[1]);
+      Class<? extends Mapper<?,?,?,?>> forName = (Class<? extends Mapper<?,?,?,?>>) Class
+          .forName(args[1]);
       job.setMapperClass(forName);
       job.setMapOutputKeyClass(Key.class);
       job.setMapOutputValueClass(Value.class);
@@ -266,7 +275,8 @@ public int run(String[] args) throws Exception {
     public static int main(String... args) throws Exception {
       Configuration conf = new Configuration();
       conf.set("mapreduce.framework.name", "local");
-      conf.set("mapreduce.cluster.local.dir", new File(System.getProperty("user.dir"), "target/mapreduce-tmp").getAbsolutePath());
+      conf.set("mapreduce.cluster.local.dir",
+          new File(System.getProperty("user.dir"), "target/mapreduce-tmp").getAbsolutePath());
       return ToolRunner.run(conf, new CIFTester(), args);
     }
   }
@@ -279,7 +289,8 @@ public void test() throws Exception {
     for (Entry<Key,Value> e : data) {
       Key k = e.getKey();
       Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
+      m.put(k.getColumnFamily(), k.getColumnQualifier(),
+          new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
       bw.addMutation(m);
     }
     bw.close();
@@ -296,7 +307,8 @@ public void testErrorOnNextWithoutClose() throws Exception {
     for (Entry<Key,Value> e : data) {
       Key k = e.getKey();
       Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
+      m.put(k.getColumnFamily(), k.getColumnQualifier(),
+          new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
       bw.addMutation(m);
     }
     bw.close();
@@ -314,7 +326,8 @@ public void testInfoWithoutChunks() throws Exception {
     for (Entry<Key,Value> e : baddata) {
       Key k = e.getKey();
       Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
+      m.put(k.getColumnFamily(), k.getColumnQualifier(),
+          new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue());
       bw.addMutation(m);
     }
     bw.close();
diff --git a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java
index 5720276..98572ab 100644
--- a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java
+++ b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamIT.java
@@ -115,25 +115,31 @@ public void setupData() {
     addData(multidata, "c", "~chunk", 100, 1, "B&C", "");
   }
 
-  static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq, String vis, String value) {
-    data.add(new KeyValue(new Key(new Text(row), new Text(cf), new Text(cq), new Text(vis)), value.getBytes()));
+  static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq, String vis,
+      String value) {
+    data.add(new KeyValue(new Key(new Text(row), new Text(cf), new Text(cq), new Text(vis)),
+        value.getBytes()));
   }
 
-  static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize, int chunkCount, String vis, String value) {
+  static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize,
+      int chunkCount, String vis, String value) {
     Text chunkCQ = new Text(FileDataIngest.intToBytes(chunkSize));
     chunkCQ.append(FileDataIngest.intToBytes(chunkCount), 0, 4);
-    data.add(new KeyValue(new Key(new Text(row), new Text(cf), chunkCQ, new Text(vis)), value.getBytes()));
+    data.add(new KeyValue(new Key(new Text(row), new Text(cf), chunkCQ, new Text(vis)),
+        value.getBytes()));
   }
 
   @Test
-  public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException, TableExistsException, TableNotFoundException, IOException {
+  public void testWithAccumulo() throws AccumuloException, AccumuloSecurityException,
+      TableExistsException, TableNotFoundException, IOException {
     client.tableOperations().create(tableName);
     BatchWriter bw = client.createBatchWriter(tableName, new BatchWriterConfig());
 
     for (Entry<Key,Value> e : data) {
       Key k = e.getKey();
       Mutation m = new Mutation(k.getRow());
-      m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), e.getValue());
+      m.put(k.getColumnFamily(), k.getColumnQualifier(),
+          new ColumnVisibility(k.getColumnVisibility()), e.getValue());
       bw.addMutation(m);
     }
     bw.close();
diff --git a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamTest.java b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamTest.java
index d36e5ce..8096d79 100644
--- a/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamTest.java
+++ b/src/test/java/org/apache/accumulo/examples/filedata/ChunkInputStreamTest.java
@@ -86,14 +86,18 @@ public void setupData() {
     addData(multidata, "c", "~chunk", 100, 1, "B&C", "");
   }
 
-  private static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq, String vis, String value) {
-    data.add(new KeyValue(new Key(new Text(row), new Text(cf), new Text(cq), new Text(vis)), value.getBytes()));
+  private static void addData(List<Entry<Key,Value>> data, String row, String cf, String cq,
+      String vis, String value) {
+    data.add(new KeyValue(new Key(new Text(row), new Text(cf), new Text(cq), new Text(vis)),
+        value.getBytes()));
   }
 
-  private static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize, int chunkCount, String vis, String value) {
+  private static void addData(List<Entry<Key,Value>> data, String row, String cf, int chunkSize,
+      int chunkCount, String vis, String value) {
     Text chunkCQ = new Text(FileDataIngest.intToBytes(chunkSize));
     chunkCQ.append(FileDataIngest.intToBytes(chunkCount), 0, 4);
-    data.add(new KeyValue(new Key(new Text(row), new Text(cf), chunkCQ, new Text(vis)), value.getBytes()));
+    data.add(new KeyValue(new Key(new Text(row), new Text(cf), chunkCQ, new Text(vis)),
+        value.getBytes()));
   }
 
   @Test
diff --git a/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java b/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
index 7bf9c86..78d3572 100644
--- a/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
+++ b/src/test/java/org/apache/accumulo/examples/mapreduce/MapReduceIT.java
@@ -59,7 +59,8 @@ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSit
     cfg.setProperty(Property.TSERV_NATIVEMAP_ENABLED, "false");
   }
 
-  public static final String hadoopTmpDirArg = "-Dhadoop.tmp.dir=" + System.getProperty("user.dir") + "/target/hadoop-tmp";
+  public static final String hadoopTmpDirArg = "-Dhadoop.tmp.dir=" + System.getProperty("user.dir")
+      + "/target/hadoop-tmp";
 
   static final String tablename = "mapredf";
   static final String input_cf = "cf-HASHTYPE";
@@ -70,15 +71,17 @@ protected void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSit
 
   @Test
   public void test() throws Exception {
-    String confFile = System.getProperty("user.dir")+"/target/examples.conf";
+    String confFile = System.getProperty("user.dir") + "/target/examples.conf";
     String instance = getClientInfo().getInstanceName();
     String keepers = getClientInfo().getZooKeepers();
     ExamplesIT.writeClientPropsFile(confFile, instance, keepers, "root", ROOT_PASSWORD);
     runTest(confFile, getClient(), getCluster());
   }
 
-  static void runTest(String confFile, AccumuloClient c, MiniAccumuloClusterImpl cluster) throws AccumuloException, AccumuloSecurityException, TableExistsException,
-      TableNotFoundException, MutationsRejectedException, IOException, InterruptedException, NoSuchAlgorithmException {
+  static void runTest(String confFile, AccumuloClient c, MiniAccumuloClusterImpl cluster)
+      throws AccumuloException, AccumuloSecurityException, TableExistsException,
+      TableNotFoundException, MutationsRejectedException, IOException, InterruptedException,
+      NoSuchAlgorithmException {
     c.tableOperations().create(tablename);
     BatchWriter bw = c.createBatchWriter(tablename, new BatchWriterConfig());
     for (int i = 0; i < 10; i++) {
@@ -87,7 +90,8 @@ static void runTest(String confFile, AccumuloClient c, MiniAccumuloClusterImpl c
       bw.addMutation(m);
     }
     bw.close();
-    Process hash = cluster.exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-c", confFile, "-t", tablename, "--column", input_cfcq);
+    Process hash = cluster.exec(RowHash.class, Collections.singletonList(hadoopTmpDirArg), "-c",
+        confFile, "-t", tablename, "--column", input_cfcq);
     assertEquals(0, hash.waitFor());
 
     Scanner s = c.createScanner(tablename, Authorizations.EMPTY);


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services