You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@chukwa.apache.org by ey...@apache.org on 2011/01/23 21:57:09 UTC

svn commit: r1062530 - in /incubator/chukwa/trunk: CHANGES.txt conf/hbase.schema script/pig/ClusterSummary.pig

Author: eyang
Date: Sun Jan 23 20:57:09 2011
New Revision: 1062530

URL: http://svn.apache.org/viewvc?rev=1062530&view=rev
Log:
CHUKWA-575. Added cluster summary pig script. (Eric Yang)

Added:
    incubator/chukwa/trunk/script/pig/ClusterSummary.pig
Modified:
    incubator/chukwa/trunk/CHANGES.txt
    incubator/chukwa/trunk/conf/hbase.schema

Modified: incubator/chukwa/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/incubator/chukwa/trunk/CHANGES.txt?rev=1062530&r1=1062529&r2=1062530&view=diff
==============================================================================
--- incubator/chukwa/trunk/CHANGES.txt (original)
+++ incubator/chukwa/trunk/CHANGES.txt Sun Jan 23 20:57:09 2011
@@ -4,6 +4,8 @@ Trunk (unreleased changes)
 
   NEW FEATURES
 
+    CHUKWA-575. Added cluster summary pig script. (Eric Yang)
+
     CHUKWA-571. Add digested system metrics to SystemMetrics demux parser. (Eric Yang)
 
     CHUKWA-563. Added configuration to skip HBase schema validation for HBaseWriter. (Eric Yang)

Modified: incubator/chukwa/trunk/conf/hbase.schema
URL: http://svn.apache.org/viewvc/incubator/chukwa/trunk/conf/hbase.schema?rev=1062530&r1=1062529&r2=1062530&view=diff
==============================================================================
--- incubator/chukwa/trunk/conf/hbase.schema (original)
+++ incubator/chukwa/trunk/conf/hbase.schema Sun Jan 23 20:57:09 2011
@@ -1,42 +1,30 @@
 create "Hadoop",{NAME => "ClientTrace", VERSIONS => 65535},
-{NAME => "jvm_metrics", VERSIONS => 65535},
-{NAME => "mapred_metrics", VERSIONS => 65535},
-{NAME => "dfs_metrics", VERSIONS => 65535},
 {NAME => "dfs_namenode", VERSIONS => 65535},
 {NAME => "dfs_FSDirectory", VERSIONS => 65535},
 {NAME => "dfs_FSNamesystem", VERSIONS => 65535},
 {NAME => "dfs_datanode", VERSIONS => 65535},
 {NAME => "mapred_jobtracker", VERSIONS => 65535},
-{NAME => "mapred_shuffleInput", VERSIONS => 65535},
 {NAME => "mapred_shuffleOutput", VERSIONS => 65535},
 {NAME => "mapred_tasktracker", VERSIONS => 65535},
 {NAME => "mapred_job", VERSIONS => 65535},
+{NAME => "jvm_metrics", VERSIONS => 65535},
 {NAME => "rpc_metrics", VERSIONS => 65535}
 create "HadoopLog", {NAME => "NameNode", VERSIONS => 65535},
 {NAME => "DataNode", VERSIONS => 65535},
 {NAME => "Audit", VERSIONS => 65535},
-{NAME => "HadoopLog", VERSIONS => 65535},
-{NAME => "HadoopLogInError", VERSIONS => 65535}
-create "SystemMetrics", {NAME => "Disk", VERSIONS => 65535},
-{NAME => "SystemMetrics", VERSIONS => 65535},
-{NAME => "Ps", VERSIONS => 65535},
-{NAME => "SysLog", VERSIONS => 65535},
-{NAME => "Top", VERSIONS => 65535},
-{NAME => "Df", VERSIONS => 65535},
-{NAME => "org.apache.hadoop.chukwa.extraction.demux.processor.mapper.Top", VERSIONS => 65535},
-{NAME => "TopInError", VERSIONS => 65535},
-{NAME => "DfInError", VERSIONS => 65535},
+{NAME => "HadoopLog", VERSIONS => 65535}
+create "SystemMetrics", 
 {NAME => "cpu", VERSIONS => 65535},
 {NAME => "system", VERSION => 65535},
 {NAME => "disk", VERSION => 65535},
 {NAME => "memory", VERSION => 65535},
-{NAME => "network", VERSION => 65535}
-create "Mapreduce",{NAME => "JobData", VERSIONS => 65535},
-{NAME => "JobConfData", VERSIONS => 65535},
-{NAME => "JobLogHistory", VERSIONS => 65535},
-{NAME => "TaskData", VERSIONS => 65535},
-{NAME => "TaskLogInError", VERSIONS => 65535}
-create "TsProcessor",{NAME => "log", VERSIONS => 65535}
-create "chukwa", {NAME => "chukwaAgent_chunkQueue", VERSIONS => 65535},
-{NAME => "chukwaAgent_metrics", VERSIONS => 65535},
-{NAME => "chukwaAgent_httpSender", VERSIONS => 65535}
+{NAME => "network", VERSION => 65535},
+{NAME => "tags", VERSION => 65535}
+create "ClusterSummary", {NAME=> "cpu", VERSIONS => 65535},
+{NAME => "system", VERSION => 65535},
+{NAME => "disk", VERSION => 65535},
+{NAME => "memory", VERSION => 65535},
+{NAME => "network", VERSION => 65535},
+{NAME => "hdfs", VERSION => 65535},
+{NAME => "mapreduce", VERSION => 65535}
+

Added: incubator/chukwa/trunk/script/pig/ClusterSummary.pig
URL: http://svn.apache.org/viewvc/incubator/chukwa/trunk/script/pig/ClusterSummary.pig?rev=1062530&view=auto
==============================================================================
--- incubator/chukwa/trunk/script/pig/ClusterSummary.pig (added)
+++ incubator/chukwa/trunk/script/pig/ClusterSummary.pig Sun Jan 23 20:57:09 2011
@@ -0,0 +1,17 @@
+%default START '1234567890';
+SystemMetrics = load 'hbase://SystemMetrics' USING org.apache.pig.backend.hadoop.hbase.HBaseStorage('tags:cluster cpu:combined cpu:idle cpu:sys cpu:user disk:ReadBytes disk:Reads disk:WriteBytes disk:Writes system:LoadAverage.1 memory:FreePercent memory:UsedPercent network:RxBytes network:RxDropped network:RxErrors network:RxPackets network:TxBytes network:TxCollisions network:TxErrors network:TxPackets','-loadKey -gt $START -caster Utf8StorageConverter') AS (rowKey, cluster, cpuCombined, cpuIdle, cpuSys, cpuUser, diskReadBytes, diskReads, diskWriteBytes, diskWrites, LoadAverage, memoryFreePercent, memoryUsedPercent, networkRxBytes, networkRxDropped, networkRxErrors, networkRxPackets, networkTxBytes, networkTxCollisions, networkTxErrors, networkTxPackets);
+CleanseBuffer = foreach SystemMetrics generate REGEX_EXTRACT($0,'^\\d+',0) as time, cluster, cpuCombined, cpuIdle, cpuSys, cpuUser, diskReadBytes, diskReads, diskWriteBytes, diskWrites, LoadAverage, memoryFreePercent, memoryUsedPercent, networkRxBytes, networkRxDropped, networkRxErrors, networkRxPackets, networkTxBytes, networkTxCollisions, networkTxErrors, networkTxPackets;
+ConcatBuffer = foreach CleanseBuffer generate CONCAT(CONCAT($0, '-'), $1) as rowId, cpuCombined, cpuIdle, cpuSys, cpuUser, diskReadBytes, diskReads, diskWriteBytes, diskWrites, LoadAverage, memoryFreePercent, memoryUsedPercent, networkRxBytes, networkRxDropped, networkRxErrors, networkRxPackets, networkTxBytes, networkTxCollisions, networkTxErrors, networkTxPackets;
+TimeSeries = GROUP ConcatBuffer BY rowId;
+ComputeBuffer = FOREACH TimeSeries GENERATE group, AVG(ConcatBuffer.cpuCombined), AVG(ConcatBuffer.cpuIdle), AVG(ConcatBuffer.cpuSys), AVG(ConcatBuffer.cpuUser), AVG(ConcatBuffer.diskReadBytes), AVG(ConcatBuffer.diskReads), AVG(ConcatBuffer.diskWriteBytes), AVG(ConcatBuffer.diskWrites), AVG(ConcatBuffer.LoadAverage), AVG(ConcatBuffer.memoryFreePercent), AVG(ConcatBuffer.memoryUsedPercent), AVG(ConcatBuffer.networkRxBytes), AVG(ConcatBuffer.networkRxDropped), AVG(ConcatBuffer.networkRxErrors), AVG(ConcatBuffer.networkRxPackets), AVG(ConcatBuffer.networkTxBytes), AVG(ConcatBuffer.networkTxCollisions), AVG(ConcatBuffer.networkTxErrors), AVG(ConcatBuffer.networkTxPackets);
+STORE ComputeBuffer INTO 'ClusterSummary' USING org.apache.pig.backend.hadoop.hbase.HBaseStorage('cpu:Combined cpu:Idle cpu:Sys cpu:User disk:ReadBytes disk:Reads disk:WriteBytes disk:Writes system:LoadAverage memory:FreePercent memory:UsedPercent network:RxBytes network:RxDropped network:RxErrors network:RxPackets network:TxBytes network:TxCollisions network:TxErrors network:TxPackets');
+HDFSMetrics = load 'hbase://Hadoop' USING org.apache.pig.backend.hadoop.hbase.HBaseStorage('dfs_FSNamesystem:cluster dfs_FSNamesystem:CapacityTotalGB dfs_FSNamesystem:CapacityUsedGB dfs_FSNamesystem:CapacityRemainingGB dfs_FSNamesystem:BlockCapacity dfs_FSNamesystem:BlocksTotal dfs_FSNamesystem:MissingBlocks dfs_FSNamesystem:CorruptBlocks dfs_FSNamesystem:UnderReplicatedBlocks','-loadKey -gt $START -caster Utf8StorageConverter') AS (rowKey, cluster, CapacityTotalGB, CapacityUsedGB, CapacityRemainingGB, BlockCapacity, BlocksTotal, MissingBlocks, CorruptBlocks, UnderReplicatedBlocks);
+CleanseBuffer = foreach HDFSMetrics generate REGEX_EXTRACT($0,'^\\d+',0) as time, cluster, CapacityTotalGB, CapacityUsedGB, CapacityRemainingGB, BlockCapacity, BlocksTotal, MissingBlocks, CorruptBlocks, UnderReplicatedBlocks;
+ConcatBuffer = foreach CleanseBuffer generate CONCAT(CONCAT($0, '-'), $1) as rowId, CapacityTotalGB, CapacityUsedGB, CapacityRemainingGB, BlockCapacity, BlocksTotal, MissingBlocks, CorruptBlocks, UnderReplicatedBlocks;
+STORE ConcatBuffer INTO 'ClusterSummary' USING org.apache.pig.backend.hadoop.hbase.HBaseStorage('hdfs:CapacityTotalGB hdfs:CapacityUsedGB hdfs:CapacityRemainingGB hdfs:BlockCapacity hdfs:BlocksTotal hdfs:MissingBlocks hdfs:CorruptBlocks hdfs:UnderReplicatedBlocks');
+MapReduceMetrics = load 'hbase://Hadoop' USING org.apache.pig.backend.hadoop.hbase.HBaseStorage('mapred_tasktracker:cluster mapred_tasktracker:mapTaskSlots mapred_tasktracker:maps_running mapred_tasktracker:reduceTaskSlots mapred_tasktracker:reduces_running mapred_tasktracker:tasks_completed mapred_tasktracker:tasks_failed_ping mapred_tasktracker:tasks_failed_timeout','-loadKey -gt $START -caster Utf8StorageConverter') AS (rowKey, cluster, mapTaskSlots, mapsRunning, reduceTaskSlots, reduceRunning, tasksCompleted, tasksFailedPing, tasksFailedTimeout);
+CleanseBuffer = foreach MapReduceMetrics generate REGEX_EXTRACT($0,'^\\d+',0) as time, cluster, mapTaskSlots, mapsRunning, reduceTaskSlots, reduceRunning, tasksCompleted, tasksFailedPing, tasksFailedTimeout;
+GroupBuffer = foreach CleanseBuffer generate CONCAT(CONCAT($0, '-'), $1) as rowId, mapTaskSlots, mapsRunning, reduceTaskSlots, reduceRunning, tasksCompleted, tasksFailedPing, tasksFailedTimeout;
+TimeSeries = GROUP GroupBuffer BY rowId;
+MapReduceSummary = FOREACH TimeSeries GENERATE group, SUM(GroupBuffer.mapTaskSlots), SUM(GroupBuffer.mapsRunning), SUM(GroupBuffer.reduceTaskSlots), SUM(GroupBuffer.reduceRunning), SUM(GroupBuffer.tasksCompleted), SUM(GroupBuffer.tasksFailedPing), SUM(GroupBuffer.tasksFailedTimeout);
+STORE MapReduceSummary INTO 'ClusterSummary' USING org.apache.pig.backend.hadoop.hbase.HBaseStorage('mapreduce:mapTaskSlots mapreduce:mapsRunning mapreduce:reduceTaskSlots mapreduce:reduceRunning mapreduce:tasksCompleted mapreduce:tasksFailedPing mapreduce:tasksFailedTimeout');