You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2017/06/30 17:23:23 UTC

[4/4] hbase git commit: HBASE-18239 rubocop autocorrect for shell

HBASE-18239 rubocop autocorrect for shell

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2115d4b5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2115d4b5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2115d4b5

Branch: refs/heads/master
Commit: 2115d4b50ecf7a17028d88c9bccef076e516a3e2
Parents: 90fee69
Author: Mike Drob <md...@apache.org>
Authored: Tue Jun 20 15:24:29 2017 -0500
Committer: Michael Stack <st...@apache.org>
Committed: Fri Jun 30 10:23:12 2017 -0700

----------------------------------------------------------------------
 hbase-shell/src/main/ruby/hbase/admin.rb        | 612 +++++++++----------
 hbase-shell/src/main/ruby/hbase/hbase.rb        |  20 +-
 hbase-shell/src/main/ruby/hbase/quotas.rb       | 180 +++---
 .../src/main/ruby/hbase/replication_admin.rb    |  54 +-
 .../src/main/ruby/hbase/rsgroup_admin.rb        |  39 +-
 hbase-shell/src/main/ruby/hbase/security.rb     | 138 +++--
 hbase-shell/src/main/ruby/hbase/table.rb        | 334 +++++-----
 hbase-shell/src/main/ruby/hbase/taskmonitor.rb  | 131 ++--
 .../src/main/ruby/hbase/visibility_labels.rb    |  81 +--
 hbase-shell/src/main/ruby/hbase_constants.rb    |  98 +--
 hbase-shell/src/main/ruby/irb/hirb.rb           |  29 +-
 hbase-shell/src/main/ruby/shell.rb              | 122 ++--
 hbase-shell/src/main/ruby/shell/commands.rb     |  35 +-
 .../main/ruby/shell/commands/abort_procedure.rb |   6 +-
 .../src/main/ruby/shell/commands/add_labels.rb  |   2 +-
 .../src/main/ruby/shell/commands/add_peer.rb    |   4 +-
 .../src/main/ruby/shell/commands/add_rsgroup.rb |   2 +-
 .../src/main/ruby/shell/commands/alter.rb       |   2 +-
 .../src/main/ruby/shell/commands/alter_async.rb |   2 +-
 .../main/ruby/shell/commands/alter_namespace.rb |   2 +-
 .../main/ruby/shell/commands/alter_status.rb    |   3 +-
 .../src/main/ruby/shell/commands/append.rb      |  10 +-
 .../shell/commands/append_peer_namespaces.rb    |   4 +-
 .../ruby/shell/commands/append_peer_tableCFs.rb |   4 +-
 .../src/main/ruby/shell/commands/assign.rb      |   2 +-
 .../main/ruby/shell/commands/balance_rsgroup.rb |   4 +-
 .../main/ruby/shell/commands/balance_switch.rb  |   4 +-
 .../src/main/ruby/shell/commands/balancer.rb    |   8 +-
 .../ruby/shell/commands/balancer_enabled.rb     |   4 +-
 .../shell/commands/catalogjanitor_enabled.rb    |   6 +-
 .../ruby/shell/commands/catalogjanitor_run.rb   |   7 +-
 .../shell/commands/catalogjanitor_switch.rb     |   4 +-
 .../shell/commands/cleaner_chore_enabled.rb     |   8 +-
 .../ruby/shell/commands/cleaner_chore_run.rb    |   9 +-
 .../ruby/shell/commands/cleaner_chore_switch.rb |   6 +-
 .../src/main/ruby/shell/commands/clear_auths.rb |   2 +-
 .../shell/commands/clear_compaction_queues.rb   |   4 +-
 .../main/ruby/shell/commands/clone_snapshot.rb  |   8 +-
 .../main/ruby/shell/commands/close_region.rb    |   6 +-
 .../src/main/ruby/shell/commands/compact.rb     |   4 +-
 .../src/main/ruby/shell/commands/compact_rs.rb  |   2 +-
 .../ruby/shell/commands/compaction_state.rb     |   2 +-
 .../src/main/ruby/shell/commands/count.rb       |  12 +-
 .../src/main/ruby/shell/commands/create.rb      |   6 +-
 .../ruby/shell/commands/create_namespace.rb     |   2 +-
 .../src/main/ruby/shell/commands/delete.rb      |   6 +-
 .../ruby/shell/commands/delete_all_snapshot.rb  |  16 +-
 .../main/ruby/shell/commands/delete_snapshot.rb |   2 +-
 .../shell/commands/delete_table_snapshots.rb    |  14 +-
 .../src/main/ruby/shell/commands/deleteall.rb   |   6 +-
 .../src/main/ruby/shell/commands/describe.rb    |  10 +-
 .../ruby/shell/commands/describe_namespace.rb   |   6 +-
 .../src/main/ruby/shell/commands/disable.rb     |   2 +-
 .../src/main/ruby/shell/commands/disable_all.rb |   8 +-
 .../main/ruby/shell/commands/disable_peer.rb    |   4 +-
 .../shell/commands/disable_table_replication.rb |   4 +-
 .../src/main/ruby/shell/commands/drop.rb        |   2 +-
 .../src/main/ruby/shell/commands/drop_all.rb    |   8 +-
 .../main/ruby/shell/commands/drop_namespace.rb  |   2 +-
 .../src/main/ruby/shell/commands/enable.rb      |   2 +-
 .../src/main/ruby/shell/commands/enable_all.rb  |   8 +-
 .../src/main/ruby/shell/commands/enable_peer.rb |   4 +-
 .../shell/commands/enable_table_replication.rb  |   4 +-
 .../src/main/ruby/shell/commands/exists.rb      |   6 +-
 .../src/main/ruby/shell/commands/flush.rb       |   2 +-
 hbase-shell/src/main/ruby/shell/commands/get.rb |  16 +-
 .../src/main/ruby/shell/commands/get_auths.rb   |   4 +-
 .../src/main/ruby/shell/commands/get_counter.rb |   4 +-
 .../main/ruby/shell/commands/get_peer_config.rb |  13 +-
 .../src/main/ruby/shell/commands/get_rsgroup.rb |   4 +-
 .../ruby/shell/commands/get_server_rsgroup.rb   |   2 +-
 .../src/main/ruby/shell/commands/get_splits.rb  |   6 +-
 .../src/main/ruby/shell/commands/get_table.rb   |   4 +-
 .../ruby/shell/commands/get_table_rsgroup.rb    |   4 +-
 .../src/main/ruby/shell/commands/grant.rb       |  47 +-
 .../src/main/ruby/shell/commands/incr.rb        |  10 +-
 .../src/main/ruby/shell/commands/is_disabled.rb |   4 +-
 .../src/main/ruby/shell/commands/is_enabled.rb  |   4 +-
 .../src/main/ruby/shell/commands/list.rb        |  10 +-
 .../src/main/ruby/shell/commands/list_labels.rb |   6 +-
 .../src/main/ruby/shell/commands/list_locks.rb  |  14 +-
 .../main/ruby/shell/commands/list_namespace.rb  |   8 +-
 .../shell/commands/list_namespace_tables.rb     |   6 +-
 .../ruby/shell/commands/list_peer_configs.rb    |   6 +-
 .../src/main/ruby/shell/commands/list_peers.rb  |  20 +-
 .../main/ruby/shell/commands/list_procedures.rb |  10 +-
 .../ruby/shell/commands/list_quota_snapshots.rb |  24 +-
 .../shell/commands/list_quota_table_sizes.rb    |   8 +-
 .../src/main/ruby/shell/commands/list_quotas.rb |   8 +-
 .../main/ruby/shell/commands/list_regions.rb    | 169 +++--
 .../shell/commands/list_replicated_tables.rb    |  22 +-
 .../main/ruby/shell/commands/list_rsgroups.rb   |   2 +-
 .../commands/list_security_capabilities.rb      |  26 +-
 .../ruby/shell/commands/list_snapshot_sizes.rb  |   8 +-
 .../main/ruby/shell/commands/list_snapshots.rb  |  12 +-
 .../ruby/shell/commands/list_table_snapshots.rb |  12 +-
 .../main/ruby/shell/commands/locate_region.rb   |   8 +-
 .../main/ruby/shell/commands/major_compact.rb   |   4 +-
 .../main/ruby/shell/commands/merge_region.rb    |   2 +-
 .../src/main/ruby/shell/commands/move.rb        |   2 +-
 .../ruby/shell/commands/move_servers_rsgroup.rb |   2 +-
 .../commands/move_servers_tables_rsgroup.rb     |   2 +-
 .../ruby/shell/commands/move_tables_rsgroup.rb  |   2 +-
 .../src/main/ruby/shell/commands/normalize.rb   |   7 +-
 .../ruby/shell/commands/normalizer_enabled.rb   |   4 +-
 .../ruby/shell/commands/normalizer_switch.rb    |   4 +-
 .../src/main/ruby/shell/commands/processlist.rb |  19 +-
 hbase-shell/src/main/ruby/shell/commands/put.rb |   8 +-
 .../src/main/ruby/shell/commands/remove_peer.rb |   4 +-
 .../shell/commands/remove_peer_namespaces.rb    |   4 +-
 .../ruby/shell/commands/remove_peer_tableCFs.rb |   2 +-
 .../main/ruby/shell/commands/remove_rsgroup.rb  |   2 +-
 .../ruby/shell/commands/restore_snapshot.rb     |   4 +-
 .../src/main/ruby/shell/commands/revoke.rb      |   8 +-
 .../src/main/ruby/shell/commands/scan.rb        |  28 +-
 .../src/main/ruby/shell/commands/set_auths.rb   |   2 +-
 .../ruby/shell/commands/set_peer_bandwidth.rb   |   4 +-
 .../ruby/shell/commands/set_peer_namespaces.rb  |   4 +-
 .../ruby/shell/commands/set_peer_tableCFs.rb    |   4 +-
 .../src/main/ruby/shell/commands/set_quota.rb   |  44 +-
 .../main/ruby/shell/commands/set_visibility.rb  |   3 +-
 .../main/ruby/shell/commands/show_filters.rb    |   4 +-
 .../ruby/shell/commands/show_peer_tableCFs.rb   |   4 +-
 .../src/main/ruby/shell/commands/snapshot.rb    |   2 +-
 .../src/main/ruby/shell/commands/split.rb       |   6 +-
 .../src/main/ruby/shell/commands/status.rb      |   4 +-
 .../src/main/ruby/shell/commands/table_help.rb  |   4 +-
 .../src/main/ruby/shell/commands/trace.rb       |  19 +-
 .../src/main/ruby/shell/commands/truncate.rb    |   3 +-
 .../ruby/shell/commands/truncate_preserve.rb    |   3 +-
 .../src/main/ruby/shell/commands/unassign.rb    |   2 +-
 .../ruby/shell/commands/update_all_config.rb    |   6 +-
 .../main/ruby/shell/commands/update_config.rb   |   2 +-
 .../ruby/shell/commands/update_peer_config.rb   |   4 +-
 .../main/ruby/shell/commands/user_permission.rb |  10 +-
 .../src/main/ruby/shell/commands/version.rb     |   8 +-
 .../src/main/ruby/shell/commands/wal_roll.rb    |   4 +-
 .../src/main/ruby/shell/commands/whoami.rb      |  12 +-
 .../src/main/ruby/shell/commands/zk_dump.rb     |   2 +-
 hbase-shell/src/main/ruby/shell/formatter.rb    |  65 +-
 140 files changed, 1448 insertions(+), 1526 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/2115d4b5/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index 634ad3b..5eee26c 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -44,35 +44,31 @@ module Hbase
 
     #----------------------------------------------------------------------------------------------
     # Returns a list of tables in hbase
-    def list(regex = ".*")
-      @admin.listTableNames(regex).map { |t| t.getNameAsString }
+    def list(regex = '.*')
+      @admin.listTableNames(regex).map(&:getNameAsString)
     end
 
     #----------------------------------------------------------------------------------------------
     # Requests a table or region flush
     def flush(table_or_region_name)
-      begin
-        @admin.flushRegion(table_or_region_name.to_java_bytes);
-      rescue java.lang.IllegalArgumentException => e
-        # Unknown region. Try table.
-        @admin.flush(TableName.valueOf(table_or_region_name));
-      end
+      @admin.flushRegion(table_or_region_name.to_java_bytes)
+    rescue java.lang.IllegalArgumentException => e
+      # Unknown region. Try table.
+      @admin.flush(TableName.valueOf(table_or_region_name))
     end
 
     #----------------------------------------------------------------------------------------------
     # Requests a table or region or column family compaction
-    def compact(table_or_region_name, family = nil, type = "NORMAL")
+    def compact(table_or_region_name, family = nil, type = 'NORMAL')
       family_bytes = nil
-      unless family.nil?
-        family_bytes = family.to_java_bytes
-      end
+      family_bytes = family.to_java_bytes unless family.nil?
       compact_type = nil
-      if type == "NORMAL"
+      if type == 'NORMAL'
         compact_type = org.apache.hadoop.hbase.client.CompactType::NORMAL
-      elsif type == "MOB"
+      elsif type == 'MOB'
         compact_type = org.apache.hadoop.hbase.client.CompactType::MOB
       else
-        raise ArgumentError, "only NORMAL or MOB accepted for type!"
+        raise ArgumentError, 'only NORMAL or MOB accepted for type!'
       end
 
       begin
@@ -85,7 +81,7 @@ module Hbase
     #----------------------------------------------------------------------------------------------
     # Gets compaction state for specified table
     def getCompactionState(table_name)
-      @admin.getCompactionState(TableName.valueOf(table_name)).name()
+      @admin.getCompactionState(TableName.valueOf(table_name)).name
     end
 
     # Requests to compact all regions on the regionserver
@@ -95,18 +91,16 @@ module Hbase
 
     #----------------------------------------------------------------------------------------------
     # Requests a table or region or column family major compaction
-    def major_compact(table_or_region_name, family = nil, type = "NORMAL")
+    def major_compact(table_or_region_name, family = nil, type = 'NORMAL')
       family_bytes = nil
-      unless family.nil?
-        family_bytes = family.to_java_bytes
-      end
+      family_bytes = family.to_java_bytes unless family.nil?
       compact_type = nil
-      if type == "NORMAL"
+      if type == 'NORMAL'
         compact_type = org.apache.hadoop.hbase.client.CompactType::NORMAL
-      elsif type == "MOB"
+      elsif type == 'MOB'
         compact_type = org.apache.hadoop.hbase.client.CompactType::MOB
       else
-        raise ArgumentError, "only NORMAL or MOB accepted for type!"
+        raise ArgumentError, 'only NORMAL or MOB accepted for type!'
       end
 
       begin
@@ -121,16 +115,14 @@ module Hbase
     def wal_roll(server_name)
       @admin.rollWALWriter(ServerName.valueOf(server_name))
     end
-    # TODO remove older hlog_roll version
-    alias :hlog_roll :wal_roll
+    # TODO: remove older hlog_roll version
+    alias hlog_roll wal_roll
 
     #----------------------------------------------------------------------------------------------
     # Requests a table or region split
     def split(table_or_region_name, split_point = nil)
       split_point_bytes = nil
-      unless split_point.nil?
-        split_point_bytes = split_point.to_java_bytes
-      end
+      split_point_bytes = split_point.to_java_bytes unless split_point.nil?
       begin
         @admin.splitRegion(table_or_region_name.to_java_bytes, split_point_bytes)
       rescue java.lang.IllegalArgumentException => e
@@ -152,7 +144,8 @@ module Hbase
       end
       @admin.setSplitOrMergeEnabled(
         java.lang.Boolean.valueOf(enabled), java.lang.Boolean.valueOf(false),
-        switch_type)[0]
+        switch_type
+      )[0]
     end
 
     #----------------------------------------------------------------------------------------------
@@ -175,7 +168,7 @@ module Hbase
       begin
         return locator.getRegionLocation(Bytes.toBytesBinary(row_key))
       ensure
-        locator.close()
+        locator.close
       end
     end
 
@@ -183,7 +176,7 @@ module Hbase
     # Requests a cluster balance
     # Returns true if balancer ran
     def balancer(force)
-      @admin.balancer(java.lang.Boolean::valueOf(force))
+      @admin.balancer(java.lang.Boolean.valueOf(force))
     end
 
     #----------------------------------------------------------------------------------------------
@@ -191,76 +184,77 @@ module Hbase
     # Returns previous balancer switch setting.
     def balance_switch(enableDisable)
       @admin.setBalancerRunning(
-        java.lang.Boolean::valueOf(enableDisable), java.lang.Boolean::valueOf(false))
+        java.lang.Boolean.valueOf(enableDisable), java.lang.Boolean.valueOf(false)
+      )
     end
 
     #----------------------------------------------------------------------------------------------
     # Query the current state of the LoadBalancer.
     # Returns the balancer's state (true is enabled).
-    def balancer_enabled?()
-      @admin.isBalancerEnabled()
+    def balancer_enabled?
+      @admin.isBalancerEnabled
     end
 
     #----------------------------------------------------------------------------------------------
     # Requests region normalization for all configured tables in the cluster
     # Returns true if normalizer ran successfully
-    def normalize()
-      @admin.normalize()
+    def normalize
+      @admin.normalize
     end
 
     #----------------------------------------------------------------------------------------------
     # Enable/disable region normalizer
     # Returns previous normalizer switch setting.
     def normalizer_switch(enableDisable)
-      @admin.setNormalizerRunning(java.lang.Boolean::valueOf(enableDisable))
+      @admin.setNormalizerRunning(java.lang.Boolean.valueOf(enableDisable))
     end
 
     #----------------------------------------------------------------------------------------------
     # Query the current state of region normalizer.
     # Returns the state of region normalizer (true is enabled).
-    def normalizer_enabled?()
-      @admin.isNormalizerEnabled()
+    def normalizer_enabled?
+      @admin.isNormalizerEnabled
     end
 
     #----------------------------------------------------------------------------------------------
     # Request a scan of the catalog table (for garbage collection)
     # Returns an int signifying the number of entries cleaned
-    def catalogjanitor_run()
-      @admin.runCatalogScan()
+    def catalogjanitor_run
+      @admin.runCatalogScan
     end
 
     #----------------------------------------------------------------------------------------------
     # Enable/disable the catalog janitor
     # Returns previous catalog janitor switch setting.
     def catalogjanitor_switch(enableDisable)
-      @admin.enableCatalogJanitor(java.lang.Boolean::valueOf(enableDisable))
+      @admin.enableCatalogJanitor(java.lang.Boolean.valueOf(enableDisable))
     end
 
     #----------------------------------------------------------------------------------------------
     # Query on the catalog janitor state (enabled/disabled?)
     # Returns catalog janitor state (true signifies enabled).
-    def catalogjanitor_enabled()
-      @admin.isCatalogJanitorEnabled()
+    def catalogjanitor_enabled
+      @admin.isCatalogJanitorEnabled
     end
 
     #----------------------------------------------------------------------------------------------
     # Request cleaner chore to run (for garbage collection of HFiles and WAL files)
-    def cleaner_chore_run()
-      @admin.runCleanerChore()
+    def cleaner_chore_run
+      @admin.runCleanerChore
     end
 
     #----------------------------------------------------------------------------------------------
     # Enable/disable the cleaner chore
     # Returns previous cleaner switch setting.
     def cleaner_chore_switch(enableDisable)
-      @admin.setCleanerChoreRunning(java.lang.Boolean::valueOf(enableDisable))
+      @admin.setCleanerChoreRunning(java.lang.Boolean.valueOf(enableDisable))
     end
 
     #----------------------------------------------------------------------------------------------
     # Query on the cleaner chore state (enabled/disabled?)
     # Returns cleaner state (true signifies enabled).
-    def cleaner_chore_enabled()
-      @admin.isCleanerChoreEnabled()
+    def cleaner_chore_enabled
+      @admin.isCleanerChoreEnabled
     end
 
     #----------------------------------------------------------------------------------------------
@@ -290,7 +284,7 @@ module Hbase
     # Disables all tables matching the given regex
     def disable_all(regex)
       regex = regex.to_s
-      @admin.disableTables(regex).map { |t| t.getTableName().getNameAsString }
+      @admin.disableTables(regex).map { |t| t.getTableName.getNameAsString }
     end
 
     #---------------------------------------------------------------------------------------------
@@ -310,7 +304,8 @@ module Hbase
     def drop(table_name)
       tableExists(table_name)
       raise ArgumentError, "Table #{table_name} is enabled. Disable it first." if enabled?(
-          table_name)
+        table_name
+      )
 
       @admin.deleteTable(org.apache.hadoop.hbase.TableName.valueOf(table_name))
     end
@@ -319,27 +314,28 @@ module Hbase
     # Drops a table
     def drop_all(regex)
       regex = regex.to_s
-      failed  = @admin.deleteTables(regex).map { |t| t.getTableName().getNameAsString }
-      return failed
+      failed = @admin.deleteTables(regex).map { |t| t.getTableName.getNameAsString }
+      failed
     end
 
     #----------------------------------------------------------------------------------------------
     # Returns ZooKeeper status dump
     def zk_dump
       @zk_wrapper = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(
-        @admin.getConfiguration(),
-       "admin",
-        nil)
-      zk = @zk_wrapper.getRecoverableZooKeeper().getZooKeeper()
+        @admin.getConfiguration,
+        'admin',
+        nil
+      )
+      zk = @zk_wrapper.getRecoverableZooKeeper.getZooKeeper
       @zk_main = org.apache.zookeeper.ZooKeeperMain.new(zk)
-      org.apache.hadoop.hbase.zookeeper.ZKUtil::dump(@zk_wrapper)
+      org.apache.hadoop.hbase.zookeeper.ZKUtil.dump(@zk_wrapper)
     end
 
     #----------------------------------------------------------------------------------------------
     # Creates a table
     def create(table_name, *args)
       # Fail if table name is not a string
-      raise(ArgumentError, "Table name must be of type String") unless table_name.kind_of?(String)
+      raise(ArgumentError, 'Table name must be of type String') unless table_name.is_a?(String)
 
       # Flatten params array
       args = args.flatten.compact
@@ -351,18 +347,18 @@ module Hbase
       # Args are either columns or splits, add them to the table definition
       # TODO: add table options support
       args.each do |arg|
-        unless arg.kind_of?(String) || arg.kind_of?(Hash)
+        unless arg.is_a?(String) || arg.is_a?(Hash)
           raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type")
         end
 
         # First, handle all the cases where arg is a column family.
-        if arg.kind_of?(String) or arg.has_key?(NAME)
+        if arg.is_a?(String) || arg.key?(NAME)
           # If the arg is a string, default action is to add a column to the table.
           # If arg has a name, it must also be a column descriptor.
-          descriptor = hcd(arg, htd);
+          descriptor = hcd(arg, htd)
           # Warn if duplicate columns are added
           if htd.hasFamily(descriptor.getName)
-            puts "Family '" + descriptor.getNameAsString() + "' already exists, the old one will be replaced"
+            puts "Family '" + descriptor.getNameAsString + "' already exists, the old one will be replaced"
             htd.modifyFamily(descriptor)
           else
             htd.addFamily(descriptor)
@@ -370,7 +366,7 @@ module Hbase
           has_columns = true
           next
         end
-        if arg.has_key?(REGION_REPLICATION)
+        if arg.key?(REGION_REPLICATION)
           region_replication = JInteger.valueOf(arg.delete(REGION_REPLICATION))
           htd.setRegionReplication(region_replication)
         end
@@ -378,35 +374,35 @@ module Hbase
         # Get rid of the "METHOD", which is deprecated for create.
         # We'll do whatever it used to do below if it's table_att.
         if (method = arg.delete(METHOD))
-            raise(ArgumentError, "table_att is currently the only supported method") unless method == 'table_att'
+          raise(ArgumentError, 'table_att is currently the only supported method') unless method == 'table_att'
         end
 
         # The hash is not a column family. Figure out what's in it.
         # First, handle splits.
-        if arg.has_key?(SPLITS_FILE)
+        if arg.key?(SPLITS_FILE)
           splits_file = arg.delete(SPLITS_FILE)
           unless File.exist?(splits_file)
             raise(ArgumentError, "Splits file #{splits_file} doesn't exist")
           end
           arg[SPLITS] = []
           File.foreach(splits_file) do |line|
-            arg[SPLITS].push(line.strip())
+            arg[SPLITS].push(line.strip)
           end
           htd.setValue(SPLITS_FILE, arg[SPLITS_FILE])
         end
 
-        if arg.has_key?(SPLITS)
+        if arg.key?(SPLITS)
           splits = Java::byte[][arg[SPLITS].size].new
           idx = 0
           arg.delete(SPLITS).each do |split|
             splits[idx] = org.apache.hadoop.hbase.util.Bytes.toBytesBinary(split)
-            idx = idx + 1
+            idx += 1
           end
-        elsif arg.has_key?(NUMREGIONS) or arg.has_key?(SPLITALGO)
+        elsif arg.key?(NUMREGIONS) || arg.key?(SPLITALGO)
           # deprecated region pre-split API; if one of the above is specified, will be ignored.
-          raise(ArgumentError, "Number of regions must be specified") unless arg.has_key?(NUMREGIONS)
-          raise(ArgumentError, "Split algorithm must be specified") unless arg.has_key?(SPLITALGO)
-          raise(ArgumentError, "Number of regions must be greater than 1") unless arg[NUMREGIONS] > 1
+          raise(ArgumentError, 'Number of regions must be specified') unless arg.key?(NUMREGIONS)
+          raise(ArgumentError, 'Split algorithm must be specified') unless arg.key?(SPLITALGO)
+          raise(ArgumentError, 'Number of regions must be greater than 1') unless arg[NUMREGIONS] > 1
           num_regions = arg.delete(NUMREGIONS)
           split_algo = RegionSplitter.newSplitAlgoInstance(@conf, arg.delete(SPLITALGO))
           splits = split_algo.split(JInteger.valueOf(num_regions))
@@ -416,12 +412,12 @@ module Hbase
         update_htd_from_arg(htd, arg)
 
         arg.each_key do |ignored_key|
-          puts("An argument ignored (unknown or overridden): %s" % [ ignored_key ])
+          puts(format('An argument ignored (unknown or overridden): %s', ignored_key))
         end
       end
 
       # Fail if no column families defined
-      raise(ArgumentError, "Table must have at least one column family") if !has_columns
+      raise(ArgumentError, 'Table must have at least one column family') unless has_columns
 
       if splits.nil?
         # Perform the create table call
@@ -437,7 +433,7 @@ module Hbase
     # If server name is nil, we presume region_name is full region name (HRegionInfo.getRegionName).
     # If server name is not nil, we presume it is the region's encoded name (HRegionInfo.getEncodedName)
     def close_region(region_name, server)
-      if (region_name.end_with? ".")
+      if region_name.end_with? '.'
         @admin.closeRegion(region_name, server)
       else
         closeEncodedRegion?(region_name, server)
@@ -454,19 +450,19 @@ module Hbase
     #----------------------------------------------------------------------------------------------
     # Unassign a region
     def unassign(region_name, force)
-      @admin.unassign(region_name.to_java_bytes, java.lang.Boolean::valueOf(force))
+      @admin.unassign(region_name.to_java_bytes, java.lang.Boolean.valueOf(force))
     end
 
     #----------------------------------------------------------------------------------------------
     # Move a region
     def move(encoded_region_name, server = nil)
-      @admin.move(encoded_region_name.to_java_bytes, server ? server.to_java_bytes: nil)
+      @admin.move(encoded_region_name.to_java_bytes, server ? server.to_java_bytes : nil)
     end
 
     #----------------------------------------------------------------------------------------------
     # Merge two regions
     def merge_region(encoded_region_a_name, encoded_region_b_name, force)
-      @admin.mergeRegions(encoded_region_a_name.to_java_bytes, encoded_region_b_name.to_java_bytes, java.lang.Boolean::valueOf(force))
+      @admin.mergeRegions(encoded_region_a_name.to_java_bytes, encoded_region_b_name.to_java_bytes, java.lang.Boolean.valueOf(force))
     end
 
     #----------------------------------------------------------------------------------------------
@@ -478,7 +474,7 @@ module Hbase
 
     def get_column_families(table_name)
       tableExists(table_name)
-      @admin.getTableDescriptor(TableName.valueOf(table_name)).getColumnFamilies()
+      @admin.getTableDescriptor(TableName.valueOf(table_name)).getColumnFamilies
     end
 
     def get_table_attributes(table_name)
@@ -502,9 +498,9 @@ module Hbase
         @admin.truncateTable(table_name, false)
       rescue => e
         # Handle the compatibility case, where the truncate method doesn't exists on the Master
-        raise e unless e.respond_to?(:cause) && e.cause != nil
+        raise e unless e.respond_to?(:cause) && !e.cause.nil?
         rootCause = e.cause
-        if rootCause.kind_of?(org.apache.hadoop.hbase.DoNotRetryIOException) then
+        if rootCause.is_a?(org.apache.hadoop.hbase.DoNotRetryIOException)
           # Handle the compatibility case, where the truncate method doesn't exists on the Master
           puts 'Dropping table...'
           @admin.deleteTable(table_name)
@@ -524,12 +520,12 @@ module Hbase
       table_name = TableName.valueOf(table_name_str)
       locator = @connection.getRegionLocator(table_name)
       begin
-        splits = locator.getAllRegionLocations().
-            map{|i| Bytes.toStringBinary(i.getRegionInfo().getStartKey)}.
-            delete_if{|k| k == ""}.to_java :String
+        splits = locator.getAllRegionLocations
+                        .map { |i| Bytes.toStringBinary(i.getRegionInfo.getStartKey) }
+                        .delete_if { |k| k == '' }.to_java :String
         splits = org.apache.hadoop.hbase.util.Bytes.toBinaryByteArrays(splits)
       ensure
-        locator.close()
+        locator.close
       end
 
       table_description = @admin.getTableDescriptor(table_name)
@@ -538,16 +534,16 @@ module Hbase
 
       begin
         puts 'Truncating table...'
-        #just for test
-        unless conf.getBoolean("hbase.client.truncatetable.support", true)
-          raise UnsupportedMethodException.new('truncateTable')
+        # just for test
+        unless conf.getBoolean('hbase.client.truncatetable.support', true)
+          raise UnsupportedMethodException, 'truncateTable'
         end
         @admin.truncateTable(table_name, true)
       rescue => e
         # Handle the compatibility case, where the truncate method doesn't exists on the Master
-        raise e unless e.respond_to?(:cause) && e.cause != nil
+        raise e unless e.respond_to?(:cause) && !e.cause.nil?
         rootCause = e.cause
-        if rootCause.kind_of?(org.apache.hadoop.hbase.DoNotRetryIOException) then
+        if rootCause.is_a?(org.apache.hadoop.hbase.DoNotRetryIOException)
           # Handle the compatibility case, where the truncate method doesn't exists on the Master
           puts 'Dropping table...'
           @admin.deleteTable(table_name)
@@ -566,7 +562,7 @@ module Hbase
       end
 
       def cause
-        return org.apache.hadoop.hbase.DoNotRetryIOException.new("#@method_name is not support")
+        org.apache.hadoop.hbase.DoNotRetryIOException.new("#{@method_name} is not support")
       end
     end
 
@@ -574,36 +570,36 @@ module Hbase
     # Check the status of alter command (number of regions reopened)
     def alter_status(table_name)
       # Table name should be a string
-      raise(ArgumentError, "Table name must be of type String") unless table_name.kind_of?(String)
+      raise(ArgumentError, 'Table name must be of type String') unless table_name.is_a?(String)
 
       # Table should exist
       raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
 
-      status = Pair.new()
+      status = Pair.new
       begin
         status = @admin.getAlterStatus(org.apache.hadoop.hbase.TableName.valueOf(table_name))
-        if status.getSecond() != 0
-          puts "#{status.getSecond() - status.getFirst()}/#{status.getSecond()} regions updated."
+        if status.getSecond != 0
+          puts "#{status.getSecond - status.getFirst}/#{status.getSecond} regions updated."
         else
-          puts "All regions updated."
+          puts 'All regions updated.'
         end
-	      sleep 1
-      end while status != nil && status.getFirst() != 0
-      puts "Done."
+        sleep 1
+      end while !status.nil? && status.getFirst != 0
+      puts 'Done.'
     end
 
     #----------------------------------------------------------------------------------------------
     # Change table structure or table options
     def alter(table_name_str, wait = true, *args)
       # Table name should be a string
-      raise(ArgumentError, "Table name must be of type String") unless
-          table_name_str.kind_of?(String)
+      raise(ArgumentError, 'Table name must be of type String') unless
+          table_name_str.is_a?(String)
 
       # Table should exist
       raise(ArgumentError, "Can't find a table: #{table_name_str}") unless exists?(table_name_str)
 
       # There should be at least one argument
-      raise(ArgumentError, "There should be at least one argument but the table name") if args.empty?
+      raise(ArgumentError, 'There should be at least one argument but the table name') if args.empty?
 
       table_name = TableName.valueOf(table_name_str)
 
@@ -613,10 +609,8 @@ module Hbase
 
       # Process all args
       args.each do |arg|
-
-
         # Normalize args to support column name only alter specs
-        arg = { NAME => arg } if arg.kind_of?(String)
+        arg = { NAME => arg } if arg.is_a?(String)
 
         # Normalize args to support shortcut delete syntax
         arg = { METHOD => 'delete', NAME => arg['delete'] } if arg['delete']
@@ -624,7 +618,7 @@ module Hbase
         # There are 3 possible options.
         # 1) Column family spec. Distinguished by having a NAME and no METHOD.
         method = arg.delete(METHOD)
-        if method == nil and arg.has_key?(NAME)
+        if method.nil? && arg.key?(NAME)
           descriptor = hcd(arg, htd)
           column_name = descriptor.getNameAsString
 
@@ -640,24 +634,24 @@ module Hbase
 
         # 2) Method other than table_att, with some args.
         name = arg.delete(NAME)
-        if method != nil and method != "table_att"
+        if !method.nil? && method != 'table_att'
           # Delete column family
-          if method == "delete"
-            raise(ArgumentError, "NAME parameter missing for delete method") unless name
+          if method == 'delete'
+            raise(ArgumentError, 'NAME parameter missing for delete method') unless name
             htd.removeFamily(name.to_java_bytes)
             hasTableUpdate = true
           # Unset table attributes
-          elsif method == "table_att_unset"
-            raise(ArgumentError, "NAME parameter missing for table_att_unset method") unless name
-            if name.kind_of?(Array)
+          elsif method == 'table_att_unset'
+            raise(ArgumentError, 'NAME parameter missing for table_att_unset method') unless name
+            if name.is_a?(Array)
               name.each do |key|
-                if (htd.getValue(key) == nil)
+                if htd.getValue(key).nil?
                   raise ArgumentError, "Could not find attribute: #{key}"
                 end
                 htd.remove(key)
               end
             else
-              if (htd.getValue(name) == nil)
+              if htd.getValue(name).nil?
                 raise ArgumentError, "Could not find attribute: #{name}"
               end
               htd.remove(name)
@@ -669,7 +663,7 @@ module Hbase
           end
 
           arg.each_key do |unknown_key|
-            puts("Unknown argument ignored: %s" % [unknown_key])
+            puts(format('Unknown argument ignored: %s', unknown_key))
           end
 
           next
@@ -680,31 +674,29 @@ module Hbase
 
         # set a coprocessor attribute
         valid_coproc_keys = []
-        if arg.kind_of?(Hash)
-          arg.each do |key, value|
-            k = String.new(key) # prepare to strip
-            k.strip!
-
-            if (k =~ /coprocessor/i)
-              v = String.new(value)
-              v.strip!
-              htd.addCoprocessorWithSpec(v)
-              valid_coproc_keys << key
-            end
-          end
-
-          valid_coproc_keys.each do |key|
-            arg.delete(key)
-          end
+        next unless arg.is_a?(Hash)
+        arg.each do |key, value|
+          k = String.new(key) # prepare to strip
+          k.strip!
+
+          next unless k =~ /coprocessor/i
+          v = String.new(value)
+          v.strip!
+          htd.addCoprocessorWithSpec(v)
+          valid_coproc_keys << key
+        end
 
-          hasTableUpdate = true
+        valid_coproc_keys.each do |key|
+          arg.delete(key)
+        end
 
-          arg.each_key do |unknown_key|
-            puts("Unknown argument ignored: %s" % [unknown_key])
-          end
+        hasTableUpdate = true
 
-          next
+        arg.each_key do |unknown_key|
+          puts(format('Unknown argument ignored: %s', unknown_key))
         end
+
+        next
       end
 
       # Bulk apply all table modifications.
@@ -712,107 +704,102 @@ module Hbase
         @admin.modifyTable(table_name, htd)
 
         if wait == true
-          puts "Updating all regions with the new schema..."
+          puts 'Updating all regions with the new schema...'
           alter_status(table_name_str)
         end
       end
     end
 
     def status(format, type)
-      status = @admin.getClusterStatus()
-      if format == "detailed"
-        puts("version %s" % [ status.getHBaseVersion() ])
+      status = @admin.getClusterStatus
+      if format == 'detailed'
+        puts(format('version %s', status.getHBaseVersion))
         # Put regions in transition first because usually empty
-        puts("%d regionsInTransition" % status.getRegionsInTransition().size())
-        for v in status.getRegionsInTransition()
-          puts("    %s" % [v])
+        puts(format('%d regionsInTransition', status.getRegionsInTransition.size))
+        for v in status.getRegionsInTransition
+          puts(format('    %s', v))
         end
-        master = status.getMaster()
-        puts("active master:  %s:%d %d" % [master.getHostname(), master.getPort(), master.getStartcode()])
-        puts("%d backup masters" % [ status.getBackupMastersSize() ])
-        for server in status.getBackupMasters()
-          puts("    %s:%d %d" % \
-            [ server.getHostname(), server.getPort(), server.getStartcode() ])
+        master = status.getMaster
+        puts(format('active master:  %s:%d %d', master.getHostname, master.getPort, master.getStartcode))
+        puts(format('%d backup masters', status.getBackupMastersSize))
+        for server in status.getBackupMasters
+          puts(format('    %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
         end
 
-        master_coprocs = java.util.Arrays.toString(@admin.getMasterCoprocessors())
-        if master_coprocs != nil
-          puts("master coprocessors: %s" % master_coprocs)
+        master_coprocs = java.util.Arrays.toString(@admin.getMasterCoprocessors)
+        unless master_coprocs.nil?
+          puts(format('master coprocessors: %s', master_coprocs))
         end
-        puts("%d live servers" % [ status.getServersSize() ])
-        for server in status.getServers()
-          puts("    %s:%d %d" % \
-            [ server.getHostname(), server.getPort(), server.getStartcode() ])
-          puts("        %s" % [ status.getLoad(server).toString() ])
-          for name, region in status.getLoad(server).getRegionsLoad()
-            puts("        %s" % [ region.getNameAsString().dump ])
-            puts("            %s" % [ region.toString() ])
+        puts(format('%d live servers', status.getServersSize))
+        for server in status.getServers
+          puts(format('    %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
+          puts(format('        %s', status.getLoad(server).toString))
+          for name, region in status.getLoad(server).getRegionsLoad
+            puts(format('        %s', region.getNameAsString.dump))
+            puts(format('            %s', region.toString))
           end
         end
-        puts("%d dead servers" % [ status.getDeadServersSize() ])
-        for server in status.getDeadServerNames()
-          puts("    %s" % [ server ])
+        puts(format('%d dead servers', status.getDeadServersSize))
+        for server in status.getDeadServerNames
+          puts(format('    %s', server))
         end
-      elsif format == "replication"
-        puts("version %s" % [ status.getHBaseVersion() ])
-        puts("%d live servers" % [ status.getServersSize() ])
-        for server in status.getServers()
+      elsif format == 'replication'
+        puts(format('version %s', status.getHBaseVersion))
+        puts(format('%d live servers', status.getServersSize))
+        for server in status.getServers
           sl = status.getLoad(server)
-          rSinkString   = "       SINK  :"
-          rSourceString = "       SOURCE:"
-          rLoadSink = sl.getReplicationLoadSink()
-          next if rLoadSink == nil
-          rSinkString << " AgeOfLastAppliedOp=" + rLoadSink.getAgeOfLastAppliedOp().to_s
-          rSinkString << ", TimeStampsOfLastAppliedOp=" +
-              (java.util.Date.new(rLoadSink.getTimeStampsOfLastAppliedOp())).toString()
-          rLoadSourceList = sl.getReplicationLoadSourceList()
+          rSinkString   = '       SINK  :'
+          rSourceString = '       SOURCE:'
+          rLoadSink = sl.getReplicationLoadSink
+          next if rLoadSink.nil?
+          rSinkString << ' AgeOfLastAppliedOp=' + rLoadSink.getAgeOfLastAppliedOp.to_s
+          rSinkString << ', TimeStampsOfLastAppliedOp=' +
+                         java.util.Date.new(rLoadSink.getTimeStampsOfLastAppliedOp).toString
+          rLoadSourceList = sl.getReplicationLoadSourceList
           index = 0
-          while index < rLoadSourceList.size()
+          while index < rLoadSourceList.size
             rLoadSource = rLoadSourceList.get(index)
-            rSourceString << " PeerID=" + rLoadSource.getPeerID()
-            rSourceString << ", AgeOfLastShippedOp=" + rLoadSource.getAgeOfLastShippedOp().to_s
-            rSourceString << ", SizeOfLogQueue=" + rLoadSource.getSizeOfLogQueue().to_s
-            rSourceString << ", TimeStampsOfLastShippedOp=" +
-                (java.util.Date.new(rLoadSource.getTimeStampOfLastShippedOp())).toString()
-            rSourceString << ", Replication Lag=" + rLoadSource.getReplicationLag().to_s
-            index = index + 1
+            rSourceString << ' PeerID=' + rLoadSource.getPeerID
+            rSourceString << ', AgeOfLastShippedOp=' + rLoadSource.getAgeOfLastShippedOp.to_s
+            rSourceString << ', SizeOfLogQueue=' + rLoadSource.getSizeOfLogQueue.to_s
+            rSourceString << ', TimeStampsOfLastShippedOp=' +
+                             java.util.Date.new(rLoadSource.getTimeStampOfLastShippedOp).toString
+            rSourceString << ', Replication Lag=' + rLoadSource.getReplicationLag.to_s
+            index += 1
           end
-          puts("    %s:" %
-          [ server.getHostname() ])
-          if type.casecmp("SOURCE") == 0
-            puts("%s" % rSourceString)
-          elsif type.casecmp("SINK") == 0
-            puts("%s" % rSinkString)
+          puts(format('    %s:', server.getHostname))
+          if type.casecmp('SOURCE') == 0
+            puts(format('%s', rSourceString))
+          elsif type.casecmp('SINK') == 0
+            puts(format('%s', rSinkString))
           else
-            puts("%s" % rSourceString)
-            puts("%s" % rSinkString)
+            puts(format('%s', rSourceString))
+            puts(format('%s', rSinkString))
           end
         end
-      elsif format == "simple"
+      elsif format == 'simple'
         load = 0
         regions = 0
-        master = status.getMaster()
-        puts("active master:  %s:%d %d" % [master.getHostname(), master.getPort(), master.getStartcode()])
-        puts("%d backup masters" % [ status.getBackupMastersSize() ])
-        for server in status.getBackupMasters()
-          puts("    %s:%d %d" % \
-            [ server.getHostname(), server.getPort(), server.getStartcode() ])
+        master = status.getMaster
+        puts(format('active master:  %s:%d %d', master.getHostname, master.getPort, master.getStartcode))
+        puts(format('%d backup masters', status.getBackupMastersSize))
+        for server in status.getBackupMasters
+          puts(format('    %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
         end
-        puts("%d live servers" % [ status.getServersSize() ])
-        for server in status.getServers()
-          puts("    %s:%d %d" % \
-            [ server.getHostname(), server.getPort(), server.getStartcode() ])
-          puts("        %s" % [ status.getLoad(server).toString() ])
-          load += status.getLoad(server).getNumberOfRequests()
-          regions += status.getLoad(server).getNumberOfRegions()
+        puts(format('%d live servers', status.getServersSize))
+        for server in status.getServers
+          puts(format('    %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
+          puts(format('        %s', status.getLoad(server).toString))
+          load += status.getLoad(server).getNumberOfRequests
+          regions += status.getLoad(server).getNumberOfRegions
         end
-        puts("%d dead servers" % [ status.getDeadServers() ])
-        for server in status.getDeadServerNames()
-          puts("    %s" % [ server ])
+        puts(format('%d dead servers', status.getDeadServers))
+        for server in status.getDeadServerNames
+          puts(format('    %s', server))
         end
-        puts("Aggregate load: %d, regions: %d" % [ load , regions ] )
+        puts(format('Aggregate load: %d, regions: %d', load, regions))
       else
-        puts "1 active master, #{status.getBackupMastersSize} backup masters, #{status.getServersSize} servers, #{status.getDeadServers} dead, #{'%.4f' % status.getAverageLoad} average load"
+        puts "1 active master, #{status.getBackupMastersSize} backup masters, #{status.getServersSize} servers, #{status.getDeadServers} dead, #{format('%.4f', status.getAverageLoad)} average load"
       end
     end
 
@@ -833,16 +820,16 @@ module Hbase
     end
 
     #----------------------------------------------------------------------------------------------
-    #Is supplied region name is encoded region name
+    # Is supplied region name is encoded region name
     def closeEncodedRegion?(region_name, server)
-       @admin.closeRegionWithEncodedRegionName(region_name, server)
+      @admin.closeRegionWithEncodedRegionName(region_name, server)
     end
 
     #----------------------------------------------------------------------------------------------
     # Return a new HColumnDescriptor made of passed args
     def hcd(arg, htd)
       # String arg, single parameter constructor
-      return org.apache.hadoop.hbase.HColumnDescriptor.new(arg) if arg.kind_of?(String)
+      return org.apache.hadoop.hbase.HColumnDescriptor.new(arg) if arg.is_a?(String)
 
       raise(ArgumentError, "Column family #{arg} must have a name") unless name = arg.delete(NAME)
 
@@ -858,8 +845,11 @@ module Hbase
       family.setEvictBlocksOnClose(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::EVICT_BLOCKS_ON_CLOSE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::EVICT_BLOCKS_ON_CLOSE)
       family.setCacheDataInL1(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_IN_L1))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_IN_L1)
       family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY)
-      family.setInMemoryCompaction(
-          org.apache.hadoop.hbase.MemoryCompactionPolicy.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION)
+      if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION)
+        family.setInMemoryCompaction(
+          org.apache.hadoop.hbase.MemoryCompactionPolicy.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))
+        )
+      end
       family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL)
       family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING)
       family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE)
@@ -872,18 +862,18 @@ module Hbase
       family.setMobThreshold(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MOB_THRESHOLD))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MOB_THRESHOLD)
       if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER)
         bloomtype = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER).upcase.to_sym
-        unless org.apache.hadoop.hbase.regionserver.BloomType.constants.include?(bloomtype)
-          raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.join(" "))
-        else
+        if org.apache.hadoop.hbase.regionserver.BloomType.constants.include?(bloomtype)
           family.setBloomFilterType(org.apache.hadoop.hbase.regionserver.BloomType.valueOf(bloomtype))
+        else
+          raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.join(' '))
         end
       end
       if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION)
         compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION).upcase.to_sym
-        unless org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression)
-          raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(" "))
-        else
+        if org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression)
           family.setCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression))
+        else
+          raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(' '))
         end
       end
       if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION)
@@ -891,44 +881,46 @@ module Hbase
         family.setEncryptionType(algorithm)
         if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION_KEY)
           key = org.apache.hadoop.hbase.io.crypto.Encryption.pbkdf128(
-            arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION_KEY))
+            arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION_KEY)
+          )
           family.setEncryptionKey(org.apache.hadoop.hbase.security.EncryptionUtil.wrapKey(@conf, key,
-            algorithm))
+                                                                                          algorithm))
         end
       end
       if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION_COMPACT)
         compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION_COMPACT).upcase.to_sym
-        unless org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression)
-          raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(" "))
-        else
+        if org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression)
           family.setCompactionCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression))
+        else
+          raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(' '))
         end
       end
       if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::STORAGE_POLICY)
-          storage_policy = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::STORAGE_POLICY).upcase
-          family.setStoragePolicy(storage_policy)
+        storage_policy = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::STORAGE_POLICY).upcase
+        family.setStoragePolicy(storage_policy)
       end
       if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MOB_COMPACT_PARTITION_POLICY)
         mob_partition_policy = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MOB_COMPACT_PARTITION_POLICY).upcase.to_sym
-        unless org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.constants.include?(mob_partition_policy)
-          raise(ArgumentError, "MOB_COMPACT_PARTITION_POLICY #{mob_partition_policy} is not supported. Use one of " + org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.constants.join(" "))
-        else
+        if org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.constants.include?(mob_partition_policy)
           family.setMobCompactPartitionPolicy(org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.valueOf(mob_partition_policy))
+        else
+          raise(ArgumentError, "MOB_COMPACT_PARTITION_POLICY #{mob_partition_policy} is not supported. Use one of " + org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.constants.join(' '))
         end
       end
 
-
       set_user_metadata(family, arg.delete(METADATA)) if arg[METADATA]
       set_descriptor_config(family, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]
-      family.setDFSReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.
-        HColumnDescriptor::DFS_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.
-        HColumnDescriptor::DFS_REPLICATION)
+      if arg.include?(org.apache.hadoop.hbase
+        .HColumnDescriptor::DFS_REPLICATION)
+        family.setDFSReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase
+          .HColumnDescriptor::DFS_REPLICATION)))
+      end
 
       arg.each_key do |unknown_key|
-        puts("Unknown argument ignored for column family %s: %s" % [name, unknown_key])
+        puts(format('Unknown argument ignored for column family %s: %s', name, unknown_key))
       end
 
-      return family
+      family
     end
 
     #----------------------------------------------------------------------------------------------
@@ -951,41 +943,42 @@ module Hbase
       # Write it back
       put = org.apache.hadoop.hbase.client.Put.new(region_bytes)
       put.addColumn(org.apache.hadoop.hbase.HConstants::CATALOG_FAMILY,
-        org.apache.hadoop.hbase.HConstants::REGIONINFO_QUALIFIER,
-        org.apache.hadoop.hbase.util.Writables.getBytes(hri))
+                    org.apache.hadoop.hbase.HConstants::REGIONINFO_QUALIFIER,
+                    org.apache.hadoop.hbase.util.Writables.getBytes(hri))
       meta.put(put)
     end
+
     # Apply user metadata to table/column descriptor
     def set_user_metadata(descriptor, metadata)
-      raise(ArgumentError, "#{METADATA} must be a Hash type") unless metadata.kind_of?(Hash)
-        for k,v in metadata
-          v = v.to_s unless v.nil?
-          descriptor.setValue(k, v)
-        end
+      raise(ArgumentError, "#{METADATA} must be a Hash type") unless metadata.is_a?(Hash)
+      for k, v in metadata
+        v = v.to_s unless v.nil?
+        descriptor.setValue(k, v)
+      end
     end
 
     #----------------------------------------------------------------------------------------------
     # Take a snapshot of specified table
     def snapshot(table, snapshot_name, *args)
       # Table name should be a string
-      raise(ArgumentError, "Table name must be of type String") unless table.kind_of?(String)
+      raise(ArgumentError, 'Table name must be of type String') unless table.is_a?(String)
 
       # Snapshot name should be a string
-      raise(ArgumentError, "Snapshot name must be of type String") unless
-          snapshot_name.kind_of?(String)
+      raise(ArgumentError, 'Snapshot name must be of type String') unless
+          snapshot_name.is_a?(String)
 
       table_name = TableName.valueOf(table)
       if args.empty?
-         @admin.snapshot(snapshot_name, table_name)
+        @admin.snapshot(snapshot_name, table_name)
       else
-         args.each do |arg|
-            if arg[SKIP_FLUSH] == true
-              @admin.snapshot(snapshot_name, table_name,
-                              org.apache.hadoop.hbase.client.SnapshotType::SKIPFLUSH)
-            else
-               @admin.snapshot(snapshot_name, table_name)
-            end
-         end
+        args.each do |arg|
+          if arg[SKIP_FLUSH] == true
+            @admin.snapshot(snapshot_name, table_name,
+                            org.apache.hadoop.hbase.client.SnapshotType::SKIPFLUSH)
+          else
+            @admin.snapshot(snapshot_name, table_name)
+          end
+        end
       end
     end
 
@@ -993,7 +986,7 @@ module Hbase
     # Restore specified snapshot
     def restore_snapshot(snapshot_name, restore_acl = false)
       conf = @connection.getConfiguration
-      take_fail_safe_snapshot = conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false)
+      take_fail_safe_snapshot = conf.getBoolean('hbase.snapshot.restore.take.failsafe.snapshot', false)
       @admin.restoreSnapshot(snapshot_name, take_fail_safe_snapshot, restore_acl)
     end
 
@@ -1017,51 +1010,51 @@ module Hbase
 
     #----------------------------------------------------------------------------------------------
     # Deletes the table snapshots matching the given regex
-    def delete_table_snapshots(tableNameRegex, snapshotNameRegex = ".*")
+    def delete_table_snapshots(tableNameRegex, snapshotNameRegex = '.*')
       @admin.deleteTableSnapshots(tableNameRegex, snapshotNameRegex).to_a
     end
 
     #----------------------------------------------------------------------------------------------
     # Returns a list of snapshots
-    def list_snapshot(regex = ".*")
+    def list_snapshot(regex = '.*')
       @admin.listSnapshots(regex).to_a
     end
 
     #----------------------------------------------------------------------------------------------
     # Returns a list of table snapshots
-    def list_table_snapshots(tableNameRegex, snapshotNameRegex = ".*")
+    def list_table_snapshots(tableNameRegex, snapshotNameRegex = '.*')
       @admin.listTableSnapshots(tableNameRegex, snapshotNameRegex).to_a
     end
 
     #----------------------------------------------------------------------------------------------
     # Returns a list of regionservers
-    def getRegionServers()
-      return @admin.getClusterStatus.getServers.map { |serverName| serverName }
+    def getRegionServers
+      @admin.getClusterStatus.getServers.map { |serverName| serverName }
     end
 
     #----------------------------------------------------------------------------------------------
     # Returns a list of servernames
     def getServerNames(servers)
-      regionservers = getRegionServers()
+      regionservers = getRegionServers
       servernames = []
 
-      if servers.length == 0
+      if servers.empty?
         # if no servers were specified as arguments, get a list of all servers
         servernames = regionservers
       else
         # Strings replace with ServerName objects in servers array
         i = 0
-        while (i < servers.length)
+        while i < servers.length
           server = servers[i]
 
           if ServerName.isFullServerName(server)
             servernames.push(ServerName.valueOf(server))
           else
-            name_list = server.split(",")
+            name_list = server.split(',')
             j = 0
-            while (j < regionservers.length)
+            while j < regionservers.length
               sn = regionservers[j]
-              if name_list[0] == sn.hostname and (name_list[1] == nil ? true : (name_list[1] == sn.port.to_s) )
+              if name_list[0] == sn.hostname && (name_list[1].nil? ? true : (name_list[1] == sn.port.to_s))
                 servernames.push(sn)
               end
               j += 1
@@ -1071,28 +1064,28 @@ module Hbase
         end
       end
 
-      return servernames
-    end 
+      servernames
+    end
 
     # Apply config specific to a table/column to its descriptor
     def set_descriptor_config(descriptor, config)
-      raise(ArgumentError, "#{CONFIGURATION} must be a Hash type") unless config.kind_of?(Hash)
-        for k,v in config
-          v = v.to_s unless v.nil?
-          descriptor.setConfiguration(k, v)
-        end
+      raise(ArgumentError, "#{CONFIGURATION} must be a Hash type") unless config.is_a?(Hash)
+      for k, v in config
+        v = v.to_s unless v.nil?
+        descriptor.setConfiguration(k, v)
+      end
     end
 
     #----------------------------------------------------------------------------------------------
     # Updates the configuration of one regionserver.
     def update_config(serverName)
-      @admin.updateConfiguration(ServerName.valueOf(serverName));
+      @admin.updateConfiguration(ServerName.valueOf(serverName))
     end
 
     #----------------------------------------------------------------------------------------------
     # Updates the configuration of all the regionservers.
-    def update_all_config()
-      @admin.updateConfiguration();
+    def update_all_config
+      @admin.updateConfiguration
     end
 
     #----------------------------------------------------------------------------------------------
@@ -1100,26 +1093,24 @@ module Hbase
     def describe_namespace(namespace_name)
       namespace = @admin.getNamespaceDescriptor(namespace_name)
 
-      unless namespace.nil?
-        return namespace.to_s
-      end
+      return namespace.to_s unless namespace.nil?
 
       raise(ArgumentError, "Failed to find namespace named #{namespace_name}")
     end
 
     #----------------------------------------------------------------------------------------------
     # Returns a list of namespaces in hbase
-    def list_namespace(regex = ".*")
+    def list_namespace(regex = '.*')
       pattern = java.util.regex.Pattern.compile(regex)
-      list = @admin.listNamespaceDescriptors.map { |ns| ns.getName }
-      list.select {|s| pattern.match(s) }
+      list = @admin.listNamespaceDescriptors.map(&:getName)
+      list.select { |s| pattern.match(s) }
     end
 
     #----------------------------------------------------------------------------------------------
     # Returns a list of tables in namespace
     def list_namespace_tables(namespace_name)
       unless namespace_name.nil?
-        return @admin.listTableNamesByNamespace(namespace_name).map { |t| t.getQualifierAsString() }
+        return @admin.listTableNamesByNamespace(namespace_name).map(&:getQualifierAsString)
       end
 
       raise(ArgumentError, "Failed to find namespace named #{namespace_name}")
@@ -1129,52 +1120,50 @@ module Hbase
     # Creates a namespace
     def create_namespace(namespace_name, *args)
       # Fail if table name is not a string
-      raise(ArgumentError, "Namespace name must be of type String") unless namespace_name.kind_of?(String)
+      raise(ArgumentError, 'Namespace name must be of type String') unless namespace_name.is_a?(String)
 
       # Flatten params array
       args = args.flatten.compact
 
       # Start defining the table
-      nsb = org.apache.hadoop.hbase.NamespaceDescriptor::create(namespace_name)
+      nsb = org.apache.hadoop.hbase.NamespaceDescriptor.create(namespace_name)
       args.each do |arg|
-        unless arg.kind_of?(Hash)
+        unless arg.is_a?(Hash)
           raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type")
         end
-        for k,v in arg
+        for k, v in arg
           v = v.to_s unless v.nil?
           nsb.addConfiguration(k, v)
         end
       end
-      @admin.createNamespace(nsb.build());
+      @admin.createNamespace(nsb.build)
     end
 
     #----------------------------------------------------------------------------------------------
     # modify a namespace
     def alter_namespace(namespace_name, *args)
       # Fail if table name is not a string
-      raise(ArgumentError, "Namespace name must be of type String") unless namespace_name.kind_of?(String)
+      raise(ArgumentError, 'Namespace name must be of type String') unless namespace_name.is_a?(String)
 
       nsd = @admin.getNamespaceDescriptor(namespace_name)
 
-      unless nsd
-        raise(ArgumentError, "Namespace does not exist")
-      end
-      nsb = org.apache.hadoop.hbase.NamespaceDescriptor::create(nsd)
+      raise(ArgumentError, 'Namespace does not exist') unless nsd
+      nsb = org.apache.hadoop.hbase.NamespaceDescriptor.create(nsd)
 
       # Flatten params array
       args = args.flatten.compact
 
       # Start defining the table
       args.each do |arg|
-        unless arg.kind_of?(Hash)
+        unless arg.is_a?(Hash)
           raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash type")
         end
         method = arg[METHOD]
-        if method == "unset"
+        if method == 'unset'
           nsb.removeConfiguration(arg[NAME])
-        elsif  method == "set"
+        elsif method == 'set'
           arg.delete(METHOD)
-          for k,v in arg
+          for k, v in arg
             v = v.to_s unless v.nil?
 
             nsb.addConfiguration(k, v)
@@ -1183,10 +1172,9 @@ module Hbase
           raise(ArgumentError, "Unknown method #{method}")
         end
       end
-      @admin.modifyNamespace(nsb.build());
+      @admin.modifyNamespace(nsb.build)
     end
 
-
     #----------------------------------------------------------------------------------------------
     # Drops a table
     def drop_namespace(namespace_name)
@@ -1200,7 +1188,7 @@ module Hbase
     end
 
     # Abort a procedure
-    def abort_procedure?(proc_id, may_interrupt_if_running=nil)
+    def abort_procedure?(proc_id, may_interrupt_if_running = nil)
       if may_interrupt_if_running.nil?
         @admin.abortProcedure(proc_id, true)
       else
@@ -1209,13 +1197,13 @@ module Hbase
     end
 
     # List all procedures
-    def list_procedures()
-      @admin.listProcedures()
+    def list_procedures
+      @admin.listProcedures
     end
 
     # List all locks
-    def list_locks()
-      @admin.listLocks();
+    def list_locks
+      @admin.listLocks
     end
 
     # Parse arguments and update HTableDescriptor accordingly
@@ -1239,20 +1227,20 @@ module Hbase
     #----------------------------------------------------------------------------------------------
     # clear compaction queues
     def clear_compaction_queues(server_name, queue_name = nil)
-      names = ['long', 'short']
+      names = %w[long short]
       queues = java.util.HashSet.new
       if queue_name.nil?
         queues.add('long')
         queues.add('short')
-      elsif queue_name.kind_of?(String)
+      elsif queue_name.is_a?(String)
         queues.add(queue_name)
-        if !(names.include?(queue_name))
+        unless names.include?(queue_name)
           raise(ArgumentError, "Unknown queue name #{queue_name}")
         end
-      elsif queue_name.kind_of?(Array)
+      elsif queue_name.is_a?(Array)
         queue_name.each do |s|
           queues.add(s)
-          if !(names.include?(s))
+          unless names.include?(s)
             raise(ArgumentError, "Unknown queue name #{s}")
           end
         end

http://git-wip-us.apache.org/repos/asf/hbase/blob/2115d4b5/hbase-shell/src/main/ruby/hbase/hbase.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb
index bc5a31d..1f37f99 100644
--- a/hbase-shell/src/main/ruby/hbase/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase/hbase.rb
@@ -39,22 +39,22 @@ module Hbase
       else
         self.configuration = HBaseConfiguration.create
         # Turn off retries in hbase and ipc.  Human doesn't want to wait on N retries.
-        configuration.setInt("hbase.client.retries.number", 7)
-        configuration.setInt("hbase.ipc.client.connect.max.retries", 3)
+        configuration.setInt('hbase.client.retries.number', 7)
+        configuration.setInt('hbase.ipc.client.connect.max.retries', 3)
       end
-      @connection = ConnectionFactory.createConnection(self.configuration)
+      @connection = ConnectionFactory.createConnection(configuration)
     end
 
     # Returns ruby's Admin class from admin.rb
-    def admin()
+    def admin
       ::Hbase::Admin.new(@connection)
     end
 
-    def rsgroup_admin()
+    def rsgroup_admin
       ::Hbase::RSGroupAdmin.new(@connection)
     end
 
-    def taskmonitor()
+    def taskmonitor
       ::Hbase::TaskMonitor.new(configuration)
     end
 
@@ -63,19 +63,19 @@ module Hbase
       ::Hbase::Table.new(@connection.getTable(TableName.valueOf(table)), shell)
     end
 
-    def replication_admin()
+    def replication_admin
       ::Hbase::RepAdmin.new(configuration)
     end
 
-    def security_admin()
+    def security_admin
       ::Hbase::SecurityAdmin.new(@connection.getAdmin)
     end
 
-    def visibility_labels_admin()
+    def visibility_labels_admin
       ::Hbase::VisibilityLabelsAdmin.new(@connection.getAdmin)
     end
 
-    def quotas_admin()
+    def quotas_admin
       ::Hbase::QuotasAdmin.new(@connection.getAdmin)
     end
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/2115d4b5/hbase-shell/src/main/ruby/hbase/quotas.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/quotas.rb b/hbase-shell/src/main/ruby/hbase/quotas.rb
index a8a8e6b..1ea8d28 100644
--- a/hbase-shell/src/main/ruby/hbase/quotas.rb
+++ b/hbase-shell/src/main/ruby/hbase/quotas.rb
@@ -30,18 +30,18 @@ java_import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
 
 module HBaseQuotasConstants
   # RPC Quota constants
-  GLOBAL_BYPASS = 'GLOBAL_BYPASS'
-  THROTTLE_TYPE = 'THROTTLE_TYPE'
-  THROTTLE = 'THROTTLE'
-  REQUEST = 'REQUEST'
-  WRITE = 'WRITE'
-  READ = 'READ'
+  GLOBAL_BYPASS = 'GLOBAL_BYPASS'.freeze
+  THROTTLE_TYPE = 'THROTTLE_TYPE'.freeze
+  THROTTLE = 'THROTTLE'.freeze
+  REQUEST = 'REQUEST'.freeze
+  WRITE = 'WRITE'.freeze
+  READ = 'READ'.freeze
   # Space quota constants
-  SPACE = 'SPACE'
-  NO_INSERTS = 'NO_INSERTS'
-  NO_WRITES = 'NO_WRITES'
-  NO_WRITES_COMPACTIONS = 'NO_WRITES_COMPACTIONS'
-  DISABLE = 'DISABLE'
+  SPACE = 'SPACE'.freeze
+  NO_INSERTS = 'NO_INSERTS'.freeze
+  NO_WRITES = 'NO_WRITES'.freeze
+  NO_WRITES_COMPACTIONS = 'NO_WRITES_COMPACTIONS'.freeze
+  DISABLE = 'DISABLE'.freeze
 end
 
 module Hbase
@@ -55,88 +55,88 @@ module Hbase
     end
 
     def throttle(args)
-      raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
+      raise(ArgumentError, 'Arguments should be a Hash') unless args.is_a?(Hash)
       type = args.fetch(THROTTLE_TYPE, REQUEST)
       args.delete(THROTTLE_TYPE)
       type, limit, time_unit = _parse_limit(args.delete(LIMIT), ThrottleType, type)
-      if args.has_key?(USER)
+      if args.key?(USER)
         user = args.delete(USER)
-        if args.has_key?(TABLE)
+        if args.key?(TABLE)
           table = TableName.valueOf(args.delete(TABLE))
-          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
           settings = QuotaSettingsFactory.throttleUser(user, table, type, limit, time_unit)
-        elsif args.has_key?(NAMESPACE)
+        elsif args.key?(NAMESPACE)
           namespace = args.delete(NAMESPACE)
-          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
           settings = QuotaSettingsFactory.throttleUser(user, namespace, type, limit, time_unit)
         else
-          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
           settings = QuotaSettingsFactory.throttleUser(user, type, limit, time_unit)
         end
-      elsif args.has_key?(TABLE)
+      elsif args.key?(TABLE)
         table = TableName.valueOf(args.delete(TABLE))
-        raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+        raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
         settings = QuotaSettingsFactory.throttleTable(table, type, limit, time_unit)
-      elsif args.has_key?(NAMESPACE)
+      elsif args.key?(NAMESPACE)
         namespace = args.delete(NAMESPACE)
-        raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+        raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
         settings = QuotaSettingsFactory.throttleNamespace(namespace, type, limit, time_unit)
       else
-        raise "One of USER, TABLE or NAMESPACE must be specified"
+        raise 'One of USER, TABLE or NAMESPACE must be specified'
       end
       @admin.setQuota(settings)
     end
 
     def unthrottle(args)
-      raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
-      if args.has_key?(USER)
+      raise(ArgumentError, 'Arguments should be a Hash') unless args.is_a?(Hash)
+      if args.key?(USER)
         user = args.delete(USER)
-        if args.has_key?(TABLE)
+        if args.key?(TABLE)
           table = TableName.valueOf(args.delete(TABLE))
-          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
           settings = QuotaSettingsFactory.unthrottleUser(user, table)
-        elsif args.has_key?(NAMESPACE)
+        elsif args.key?(NAMESPACE)
           namespace = args.delete(NAMESPACE)
-          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
           settings = QuotaSettingsFactory.unthrottleUser(user, namespace)
         else
-          raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+          raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
           settings = QuotaSettingsFactory.unthrottleUser(user)
         end
-      elsif args.has_key?(TABLE)
+      elsif args.key?(TABLE)
         table = TableName.valueOf(args.delete(TABLE))
-        raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+        raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
         settings = QuotaSettingsFactory.unthrottleTable(table)
-      elsif args.has_key?(NAMESPACE)
+      elsif args.key?(NAMESPACE)
         namespace = args.delete(NAMESPACE)
-        raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+        raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
         settings = QuotaSettingsFactory.unthrottleNamespace(namespace)
       else
-        raise "One of USER, TABLE or NAMESPACE must be specified"
+        raise 'One of USER, TABLE or NAMESPACE must be specified'
       end
       @admin.setQuota(settings)
     end
 
     def limit_space(args)
-      raise(ArgumentError, 'Argument should be a Hash') unless (not args.nil? and args.kind_of?(Hash))
+      raise(ArgumentError, 'Argument should be a Hash') unless !args.nil? && args.is_a?(Hash)
       # Let the user provide a raw number
-      if args[LIMIT].is_a?(Numeric)
-        limit = args[LIMIT]
-      else
-        # Parse a string a 1K, 2G, etc.
-        limit = _parse_size(args[LIMIT])
-      end
+      limit = if args[LIMIT].is_a?(Numeric)
+                args[LIMIT]
+              else
+                # Parse a string a 1K, 2G, etc.
+                _parse_size(args[LIMIT])
+              end
       # Extract the policy, failing if something bogus was provided
       policy = SpaceViolationPolicy.valueOf(args[POLICY])
       # Create a table or namespace quota
       if args.key?(TABLE)
         if args.key?(NAMESPACE)
-          raise(ArgumentError, "Only one of TABLE or NAMESPACE can be specified.")
+          raise(ArgumentError, 'Only one of TABLE or NAMESPACE can be specified.')
         end
         settings = QuotaSettingsFactory.limitTableSpace(TableName.valueOf(args.delete(TABLE)), limit, policy)
       elsif args.key?(NAMESPACE)
         if args.key?(TABLE)
-          raise(ArgumentError, "Only one of TABLE or NAMESPACE can be specified.")
+          raise(ArgumentError, 'Only one of TABLE or NAMESPACE can be specified.')
         end
         settings = QuotaSettingsFactory.limitNamespaceSpace(args.delete(NAMESPACE), limit, policy)
       else
@@ -147,16 +147,16 @@ module Hbase
     end
 
     def remove_space_limit(args)
-      raise(ArgumentError, 'Argument should be a Hash') unless (not args.nil? and args.kind_of?(Hash))
+      raise(ArgumentError, 'Argument should be a Hash') unless !args.nil? && args.is_a?(Hash)
       if args.key?(TABLE)
         if args.key?(NAMESPACE)
-          raise(ArgumentError, "Only one of TABLE or NAMESPACE can be specified.")
+          raise(ArgumentError, 'Only one of TABLE or NAMESPACE can be specified.')
         end
         table = TableName.valueOf(args.delete(TABLE))
         settings = QuotaSettingsFactory.removeTableSpaceLimit(table)
       elsif args.key?(NAMESPACE)
         if args.key?(TABLE)
-          raise(ArgumentError, "Only one of TABLE or NAMESPACE can be specified.")
+          raise(ArgumentError, 'Only one of TABLE or NAMESPACE can be specified.')
         end
         settings = QuotaSettingsFactory.removeNamespaceSpaceLimit(args.delete(NAMESPACE))
       else
@@ -165,52 +165,52 @@ module Hbase
       @admin.setQuota(settings)
     end
 
-    def get_master_table_sizes()
-      QuotaTableUtil.getMasterReportedTableSizes(@admin.getConnection())
+    def get_master_table_sizes
+      QuotaTableUtil.getMasterReportedTableSizes(@admin.getConnection)
     end
 
-    def get_quota_snapshots(regionserver=nil)
+    def get_quota_snapshots(regionserver = nil)
       # Ask a regionserver if we were given one
       return get_rs_quota_snapshots(regionserver) if regionserver
       # Otherwise, read from the quota table
       get_quota_snapshots_from_table
     end
 
-    def get_quota_snapshots_from_table()
+    def get_quota_snapshots_from_table
       # Reads the snapshots from the hbase:quota table
-      QuotaTableUtil.getSnapshots(@admin.getConnection())
+      QuotaTableUtil.getSnapshots(@admin.getConnection)
     end
 
     def get_rs_quota_snapshots(rs)
       # Reads the snapshots from a specific regionserver
-      QuotaTableUtil.getRegionServerQuotaSnapshots(@admin.getConnection(),
-          ServerName.valueOf(rs))
+      QuotaTableUtil.getRegionServerQuotaSnapshots(@admin.getConnection,
+                                                   ServerName.valueOf(rs))
     end
 
     def set_global_bypass(bypass, args)
-      raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
+      raise(ArgumentError, 'Arguments should be a Hash') unless args.is_a?(Hash)
 
-      if args.has_key?(USER)
+      if args.key?(USER)
         user = args.delete(USER)
-        raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+        raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
         settings = QuotaSettingsFactory.bypassGlobals(user, bypass)
       else
-        raise "Expected USER"
+        raise 'Expected USER'
       end
       @admin.setQuota(settings)
     end
 
     def list_quotas(args = {})
-      raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
+      raise(ArgumentError, 'Arguments should be a Hash') unless args.is_a?(Hash)
 
-      limit = args.delete("LIMIT") || -1
+      limit = args.delete('LIMIT') || -1
       count = 0
 
-      filter = QuotaFilter.new()
-      filter.setUserFilter(args.delete(USER)) if args.has_key?(USER)
-      filter.setTableFilter(args.delete(TABLE)) if args.has_key?(TABLE)
-      filter.setNamespaceFilter(args.delete(NAMESPACE)) if args.has_key?(NAMESPACE)
-      raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
+      filter = QuotaFilter.new
+      filter.setUserFilter(args.delete(USER)) if args.key?(USER)
+      filter.setTableFilter(args.delete(TABLE)) if args.key?(TABLE)
+      filter.setNamespaceFilter(args.delete(NAMESPACE)) if args.key?(NAMESPACE)
+      raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
 
       # Start the scanner
       scanner = @admin.getQuotaRetriever(filter)
@@ -219,30 +219,28 @@ module Hbase
 
         # Iterate results
         while iter.hasNext
-          if limit > 0 && count >= limit
-            break
-          end
+          break if limit > 0 && count >= limit
 
           settings = iter.next
           owner = {
-            USER => settings.getUserName(),
-            TABLE => settings.getTableName(),
-            NAMESPACE => settings.getNamespace(),
-          }.delete_if { |k, v| v.nil? }.map {|k, v| k.to_s + " => " + v.to_s} * ', '
+            USER => settings.getUserName,
+            TABLE => settings.getTableName,
+            NAMESPACE => settings.getNamespace
+          }.delete_if { |_k, v| v.nil? }.map { |k, v| k.to_s + ' => ' + v.to_s } * ', '
 
           yield owner, settings.to_s
 
           count += 1
         end
       ensure
-        scanner.close()
+        scanner.close
       end
 
-      return count
+      count
     end
 
-    def list_snapshot_sizes()
-      QuotaTableUtil.getObservedSnapshotSizes(@admin.getConnection())
+    def list_snapshot_sizes
+      QuotaTableUtil.getObservedSnapshotSizes(@admin.getConnection)
     end
 
     def _parse_size(str_limit)
@@ -255,7 +253,7 @@ module Hbase
           return _size_from_str(match[1].to_i, match[2])
         end
       else
-        raise(ArgumentError, "Invalid size limit syntax")
+        raise(ArgumentError, 'Invalid size limit syntax')
       end
     end
 
@@ -265,38 +263,38 @@ module Hbase
       if match
         if match[2] == 'req'
           limit = match[1].to_i
-          type = type_cls.valueOf(type + "_NUMBER")
+          type = type_cls.valueOf(type + '_NUMBER')
         else
           limit = _size_from_str(match[1].to_i, match[2])
-          type = type_cls.valueOf(type + "_SIZE")
+          type = type_cls.valueOf(type + '_SIZE')
         end
 
         if limit <= 0
-          raise(ArgumentError, "Invalid throttle limit, must be greater then 0")
+          raise(ArgumentError, 'Invalid throttle limit, must be greater then 0')
         end
 
         case match[3]
-          when 'sec'  then time_unit = TimeUnit::SECONDS
-          when 'min'  then time_unit = TimeUnit::MINUTES
-          when 'hour' then time_unit = TimeUnit::HOURS
-          when 'day'  then time_unit = TimeUnit::DAYS
+        when 'sec'  then time_unit = TimeUnit::SECONDS
+        when 'min'  then time_unit = TimeUnit::MINUTES
+        when 'hour' then time_unit = TimeUnit::HOURS
+        when 'day'  then time_unit = TimeUnit::DAYS
         end
 
         return type, limit, time_unit
       else
-        raise(ArgumentError, "Invalid throttle limit syntax")
+        raise(ArgumentError, 'Invalid throttle limit syntax')
       end
     end
 
     def _size_from_str(value, suffix)
       case suffix
-        when 'k' then value <<= 10
-        when 'm' then value <<= 20
-        when 'g' then value <<= 30
-        when 't' then value <<= 40
-        when 'p' then value <<= 50
+      when 'k' then value <<= 10
+      when 'm' then value <<= 20
+      when 'g' then value <<= 30
+      when 't' then value <<= 40
+      when 'p' then value <<= 50
       end
-      return value
+      value
     end
   end
 end