You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ja...@apache.org on 2019/08/06 17:20:28 UTC

[hbase] branch master updated: HBASE-22783 shell with removal of status and load classes

This is an automated email from the ASF dual-hosted git repository.

janh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/master by this push:
     new f02f741  HBASE-22783 shell with removal of status and load classes
f02f741 is described below

commit f02f741f99188ba0f3a176f20107a663951e3662
Author: Viraj Jasani <vi...@gmail.com>
AuthorDate: Tue Aug 6 22:50:22 2019 +0530

    HBASE-22783 shell with removal of status and load classes
    
    Signed-off-by: stack <st...@apache.org>
    Signed-off-by: Andrew Purtell <ap...@apache.org>
    Signed-off-by: Jan Hentschel <ja...@ultratendency.com>
---
 bin/draining_servers.rb                            |  2 +-
 bin/region_status.rb                               |  2 +-
 hbase-shell/src/main/ruby/hbase/admin.rb           | 86 +++++++++++++---------
 hbase-shell/src/main/ruby/hbase/taskmonitor.rb     |  2 +-
 .../src/main/ruby/shell/commands/list_regions.rb   | 29 ++++----
 hbase-shell/src/main/ruby/shell/commands/rit.rb    |  2 +-
 6 files changed, 69 insertions(+), 54 deletions(-)

diff --git a/bin/draining_servers.rb b/bin/draining_servers.rb
index 12f0ba4..99d957e 100644
--- a/bin/draining_servers.rb
+++ b/bin/draining_servers.rb
@@ -49,7 +49,7 @@ optparse.parse!
 # Return array of servernames where servername is hostname+port+startcode
 # comma-delimited
 def getServers(admin)
-  serverInfos = admin.getClusterStatus.getServers
+  serverInfos = admin.getClusterMetrics.getLiveServerMetrics.keySet
   servers = []
   serverInfos.each do |server|
     servers << server.getServerName
diff --git a/bin/region_status.rb b/bin/region_status.rb
index f8c203b..9307b9f 100644
--- a/bin/region_status.rb
+++ b/bin/region_status.rb
@@ -132,7 +132,7 @@ meta_count += 1 if $tablename.nil?
 $TableName = TableName.valueOf($tablename.to_java_bytes) unless $tablename.nil?
 loop do
   if $tablename.nil?
-    server_count = admin.getClusterStatus.getRegionsCount
+    server_count = admin.getClusterMetrics.getRegionCount
   else
     connection = ConnectionFactory.createConnection(config)
     server_count = MetaTableAccessor.allTableRegions(connection, $TableName).size
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index d830874..8d85edd 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -802,18 +802,19 @@ module Hbase
     end
 
     def status(format, type)
-      status = org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics)
+      cluster_metrics = @admin.getClusterMetrics
       if format == 'detailed'
-        puts(format('version %s', status.getHBaseVersion))
+        puts(format('version %s', cluster_metrics.getHBaseVersion))
         # Put regions in transition first because usually empty
-        puts(format('%d regionsInTransition', status.getRegionStatesInTransition.size))
-        for v in status.getRegionStatesInTransition
+        puts(format('%d regionsInTransition', cluster_metrics.getRegionStatesInTransition.size))
+        for v in cluster_metrics.getRegionStatesInTransition
           puts(format('    %s', v))
         end
-        master = status.getMaster
-        puts(format('active master:  %s:%d %d', master.getHostname, master.getPort, master.getStartcode))
-        puts(format('%d backup masters', status.getBackupMastersSize))
-        for server in status.getBackupMasters
+        master = cluster_metrics.getMasterName
+        puts(format('active master:  %s:%d %d', master.getHostname, master.getPort,
+                    master.getStartcode))
+        puts(format('%d backup masters', cluster_metrics.getBackupMasterNames.size))
+        for server in cluster_metrics.getBackupMasterNames
           puts(format('    %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
         end
 
@@ -821,24 +822,25 @@ module Hbase
         unless master_coprocs.nil?
           puts(format('master coprocessors: %s', master_coprocs))
         end
-        puts(format('%d live servers', status.getServersSize))
-        for server in status.getServers
+        puts(format('%d live servers', cluster_metrics.getLiveServerMetrics.size))
+        for server in cluster_metrics.getLiveServerMetrics.keySet
           puts(format('    %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
-          puts(format('        %s', status.getLoad(server).toString))
-          for name, region in status.getLoad(server).getRegionsLoad
+          puts(format('        %s', cluster_metrics.getLiveServerMetrics.get(server).toString))
+          for name, region in cluster_metrics.getLiveServerMetrics.get(server).getRegionMetrics
             puts(format('        %s', region.getNameAsString.dump))
             puts(format('            %s', region.toString))
           end
         end
-        puts(format('%d dead servers', status.getDeadServersSize))
-        for server in status.getDeadServerNames
+        puts(format('%d dead servers', cluster_metrics.getDeadServerNames.size))
+        for server in cluster_metrics.getDeadServerNames
           puts(format('    %s', server))
         end
       elsif format == 'replication'
-        puts(format('version %<version>s', version: status.getHBaseVersion))
-        puts(format('%<servers>d live servers', servers: status.getServersSize))
-        status.getServers.each do |server_status|
-          sl = status.getLoad(server_status)
+        puts(format('version %<version>s', version: cluster_metrics.getHBaseVersion))
+        puts(format('%<servers>d live servers',
+                    servers: cluster_metrics.getLiveServerMetrics.size))
+        cluster_metrics.getLiveServerMetrics.keySet.each do |server_name|
+          sl = cluster_metrics.getLiveServerMetrics.get(server_name)
           r_sink_string   = '      SINK:'
           r_source_string = '       SOURCE:'
           r_load_sink = sl.getReplicationLoadSink
@@ -851,7 +853,7 @@ module Hbase
                              .getTimestampsOfLastAppliedOp).toString
           r_load_source_map = sl.getReplicationLoadSourceMap
           build_source_string(r_load_source_map, r_source_string)
-          puts(format('    %<host>s:', host: server_status.getHostname))
+          puts(format('    %<host>s:', host: server_name.getHostname))
           if type.casecmp('SOURCE').zero?
             puts(format('%<source>s', source: r_source_string))
           elsif type.casecmp('SINK').zero?
@@ -864,26 +866,30 @@ module Hbase
       elsif format == 'simple'
         load = 0
         regions = 0
-        master = status.getMaster
-        puts(format('active master:  %s:%d %d', master.getHostname, master.getPort, master.getStartcode))
-        puts(format('%d backup masters', status.getBackupMastersSize))
-        for server in status.getBackupMasters
+        master = cluster_metrics.getMasterName
+        puts(format('active master:  %s:%d %d', master.getHostname, master.getPort,
+                    master.getStartcode))
+        puts(format('%d backup masters', cluster_metrics.getBackupMasterNames.size))
+        for server in cluster_metrics.getBackupMasterNames
           puts(format('    %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
         end
-        puts(format('%d live servers', status.getServersSize))
-        for server in status.getServers
+        puts(format('%d live servers', cluster_metrics.getLiveServerMetrics.size))
+        for server in cluster_metrics.getLiveServerMetrics.keySet
           puts(format('    %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
-          puts(format('        %s', status.getLoad(server).toString))
-          load += status.getLoad(server).getNumberOfRequests
-          regions += status.getLoad(server).getNumberOfRegions
+          puts(format('        %s', cluster_metrics.getLiveServerMetrics.get(server).toString))
+          load += cluster_metrics.getLiveServerMetrics.get(server).getRequestCountPerSecond
+          regions += cluster_metrics.getLiveServerMetrics.get(server).getRegionMetrics.size
         end
-        puts(format('%d dead servers', status.getDeadServers))
-        for server in status.getDeadServerNames
+        puts(format('%d dead servers', cluster_metrics.getDeadServerNames.size))
+        for server in cluster_metrics.getDeadServerNames
           puts(format('    %s', server))
         end
         puts(format('Aggregate load: %d, regions: %d', load, regions))
       else
-        puts "1 active master, #{status.getBackupMastersSize} backup masters, #{status.getServersSize} servers, #{status.getDeadServers} dead, #{format('%.4f', status.getAverageLoad)} average load"
+        puts "1 active master, #{cluster_metrics.getBackupMasterNames.size} backup masters,
+              #{cluster_metrics.getLiveServerMetrics.size} servers,
+              #{cluster_metrics.getDeadServerNames.size} dead,
+              #{format('%.4f', cluster_metrics.getAverageLoad)} average load"
       end
     end
 
@@ -1176,15 +1182,23 @@ module Hbase
     end
 
     #----------------------------------------------------------------------------------------------
-    # Returns the ClusterStatus of the cluster
-    def getClusterStatus
-      org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics)
+    # Returns the whole ClusterMetrics containing details:
+    #
+    # hbase version
+    # cluster id
+    # primary/backup master(s)
+    # master's coprocessors
+    # live/dead regionservers
+    # balancer
+    # regions in transition
+    def getClusterMetrics
+      @admin.getClusterMetrics
     end
 
     #----------------------------------------------------------------------------------------------
     # Returns a list of regionservers
     def getRegionServers
-      org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics).getServers.map { |serverName| serverName }
+      @admin.getClusterMetrics.getLiveServerMetrics.keySet.map { |server_name| server_name }
     end
 
     #----------------------------------------------------------------------------------------------
@@ -1447,7 +1461,7 @@ module Hbase
     #----------------------------------------------------------------------------------------------
     # List live region servers
     def list_liveservers
-      org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics).getServers.to_a
+      @admin.getClusterMetrics.getLiveServerMetrics.keySet.to_a
     end
 
     #---------------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/taskmonitor.rb b/hbase-shell/src/main/ruby/hbase/taskmonitor.rb
index ca4180a..1ee441d 100644
--- a/hbase-shell/src/main/ruby/hbase/taskmonitor.rb
+++ b/hbase-shell/src/main/ruby/hbase/taskmonitor.rb
@@ -79,7 +79,7 @@ module Hbase
       java_import 'java.io.InputStreamReader'
       java_import 'org.apache.hbase.thirdparty.com.google.gson.JsonParser'
 
-      infoport = org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics).getLoad(host).getInfoServerPort.to_s
+      infoport = @admin.getClusterMetrics.getLiveServerMetrics.get(host).getInfoServerPort.to_s
 
       # Note: This condition use constants from hbase-server
       # if (!@admin.getConfiguration().getBoolean(org.apache.hadoop.hbase.http.ServerConfigurationKeys::HBASE_SSL_ENABLED_KEY,
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
index dc77ae7..f748066 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_regions.rb
@@ -81,7 +81,7 @@ EOF
 
         admin_instance = admin.instance_variable_get('@admin')
         conn_instance = admin_instance.getConnection
-        cluster_status = org.apache.hadoop.hbase.ClusterStatus.new(admin_instance.getClusterMetrics)
+        cluster_metrics = admin_instance.getClusterMetrics
         hregion_locator_instance = conn_instance.getRegionLocator(TableName.valueOf(table_name))
         hregion_locator_list = hregion_locator_instance.getAllRegionLocations.to_a
         results = []
@@ -104,22 +104,23 @@ EOF
           regions.each do |hregion|
             hregion_info = hregion.getRegion
             server_name = hregion.getServerName
-            server_load = cluster_status.getLoad(server_name)
-            if server_load.nil?
-              region_load_map = java.util.HashMap.new
+            server_metrics_map = cluster_metrics.getLiveServerMetrics
+            server_metrics = server_metrics_map.get(server_name)
+            if server_metrics.nil?
+              region_metrics_map = java.util.HashMap.new
             else
-              region_load_map = server_load.getRegionsLoad
+              region_metrics_map = server_metrics.getRegionMetrics
             end
             region_name = hregion_info.getRegionNameAsString
-            region_load = region_load_map.get(hregion_info.getRegionName)
+            region_metrics = region_metrics_map.get(hregion_info.getRegionName)
 
-            if region_load.nil?
+            if region_metrics.nil?
               puts "Can not find all details for region: " \
                    "#{region_name.strip} ," \
                    " it may be disabled or in transition\n"
             else
               # Ignore regions which exceed our locality threshold
-              next unless accept_region_for_locality? region_load.getDataLocality,
+              next unless accept_region_for_locality? region_metrics.getDataLocality,
                                                       locality_threshold
             end
             result_hash = {}
@@ -147,30 +148,30 @@ EOF
             end
 
             if size_hash.key?('SIZE')
-              if region_load.nil?
+              if region_metrics.nil?
                 region_store_file_size = ''
               else
-                region_store_file_size = region_load.getStorefileSizeMB.to_s.strip
+                region_store_file_size = region_metrics.getStoreFileSize.to_s.strip
               end
               result_hash.store('SIZE', region_store_file_size)
               size_hash['SIZE'] = [size_hash['SIZE'], region_store_file_size.length].max
             end
 
             if size_hash.key?('REQ')
-              if region_load.nil?
+              if region_metrics.nil?
                 region_requests = ''
               else
-                region_requests = region_load.getRequestsCount.to_s.strip
+                region_requests = region_metrics.getRequestCount.to_s.strip
               end
               result_hash.store('REQ', region_requests)
               size_hash['REQ'] = [size_hash['REQ'], region_requests.length].max
             end
 
             if size_hash.key?('LOCALITY')
-              if region_load.nil?
+              if region_metrics.nil?
                 locality = ''
               else
-                locality = region_load.getDataLocality.to_s.strip
+                locality = region_metrics.getDataLocality.to_s.strip
               end
               result_hash.store('LOCALITY', locality)
               size_hash['LOCALITY'] = [size_hash['LOCALITY'], locality.length].max
diff --git a/hbase-shell/src/main/ruby/shell/commands/rit.rb b/hbase-shell/src/main/ruby/shell/commands/rit.rb
index cdb49cf..9d1ee27 100644
--- a/hbase-shell/src/main/ruby/shell/commands/rit.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/rit.rb
@@ -29,7 +29,7 @@ Examples:
       end
 
       def command
-        rit = admin.getClusterStatus.getRegionStatesInTransition
+        rit = admin.getClusterMetrics.getRegionStatesInTransition
         rit.each do |v|
           formatter.row([v.toDescriptiveString])
         end