You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@apisix.apache.org by me...@apache.org on 2020/05/12 07:47:59 UTC

[incubator-apisix] branch master updated: feature(prometheus): support to collect metric `overhead` (#1576)

This is an automated email from the ASF dual-hosted git repository.

membphis pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-apisix.git


The following commit(s) were added to refs/heads/master by this push:
     new 463c521  feature(prometheus): support to collect metric `overhead` (#1576)
463c521 is described below

commit 463c5219297eba98412713f9293f385c93f10715
Author: YuanSheng Wang <me...@gmail.com>
AuthorDate: Tue May 12 15:47:49 2020 +0800

    feature(prometheus): support to collect metric `overhead` (#1576)
    
    Fix #1534 .
---
 apisix/plugins/prometheus/exporter.lua | 14 +++++++++++++-
 t/plugin/prometheus.t                  | 10 ++++++++++
 2 files changed, 23 insertions(+), 1 deletion(-)

diff --git a/apisix/plugins/prometheus/exporter.lua b/apisix/plugins/prometheus/exporter.lua
index 538deab..91ef22d 100644
--- a/apisix/plugins/prometheus/exporter.lua
+++ b/apisix/plugins/prometheus/exporter.lua
@@ -20,12 +20,14 @@ local ipairs    = ipairs
 local ngx       = ngx
 local ngx_capture = ngx.location.capture
 local re_gmatch = ngx.re.gmatch
+local tonumber = tonumber
 local prometheus
 
 -- Default set of latency buckets, 1ms to 60s:
 local DEFAULT_BUCKETS = { 1, 2, 5, 7, 10, 15, 20, 25, 30, 40, 50, 60, 70,
     80, 90, 100, 200, 300, 400, 500, 1000,
-    2000, 5000, 10000, 30000, 60000 }
+    2000, 5000, 10000, 30000, 60000
+}
 
 local metrics = {}
 
@@ -54,6 +56,10 @@ function _M.init()
         "HTTP request latency per service in APISIX",
         {"type", "service", "node"}, DEFAULT_BUCKETS)
 
+    metrics.overhead = prometheus:histogram("http_overhead",
+        "HTTP request overhead per service in APISIX",
+        {"type", "service", "node"}, DEFAULT_BUCKETS)
+
     metrics.bandwidth = prometheus:counter("bandwidth",
             "Total bandwidth in bytes consumed per service in APISIX",
             {"type", "route", "service", "node"})
@@ -80,6 +86,12 @@ function _M.log(conf, ctx)
     local latency = (ngx.now() - ngx.req.start_time()) * 1000
     metrics.latency:observe(latency, "request", service_id, balancer_ip)
 
+    local overhead = latency
+    if ctx.var.upstream_response_time then
+        overhead = overhead - tonumber(ctx.var.upstream_response_time)
+    end
+    metrics.overhead:observe(overhead, "request", service_id, balancer_ip)
+
     metrics.bandwidth:inc(vars.request_length, "ingress", route_id, service_id,
                           balancer_ip)
 
diff --git a/t/plugin/prometheus.t b/t/plugin/prometheus.t
index 078d479..a5f28f9 100644
--- a/t/plugin/prometheus.t
+++ b/t/plugin/prometheus.t
@@ -524,3 +524,13 @@ GET /apisix/prometheus/metrics
 qr/apisix_http_status\{code="404",route="3",service="",node="127.0.0.1"\} 2/
 --- no_error_log
 [error]
+
+
+
+=== TEST 25: fetch the prometheus metric data with `overhead`
+--- request
+GET /apisix/prometheus/metrics
+--- response_body eval
+qr/.*apisix_http_overhead_bucket.*/
+--- no_error_log
+[error]