You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@apisix.apache.org by li...@apache.org on 2020/10/21 01:33:03 UTC
[apisix] branch master updated: feat(prometheus): Add consumer
metric data support (#2469)
This is an automated email from the ASF dual-hosted git repository.
liuxiran pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/apisix.git
The following commit(s) were added to refs/heads/master by this push:
new 127dac9 feat(prometheus): Add consumer metric data support (#2469)
127dac9 is described below
commit 127dac9ebb340db59b0fa657c1a4eb5d98bab531
Author: Joey <ma...@gmail.com>
AuthorDate: Wed Oct 21 09:32:52 2020 +0800
feat(prometheus): Add consumer metric data support (#2469)
* feat: Add consumer support for prometheus metrics
Signed-off-by: imjoey <ma...@gmail.com>
* Add more detailed comments for newly added consumer label
Signed-off-by: imjoey <ma...@gmail.com>
---
apisix/plugins/prometheus/exporter.lua | 23 +++---
t/plugin/prometheus.t | 132 +++++++++++++++++++++++++++++++--
2 files changed, 141 insertions(+), 14 deletions(-)
diff --git a/apisix/plugins/prometheus/exporter.lua b/apisix/plugins/prometheus/exporter.lua
index df8b13c..35eeeae 100644
--- a/apisix/plugins/prometheus/exporter.lua
+++ b/apisix/plugins/prometheus/exporter.lua
@@ -96,22 +96,26 @@ function _M.init()
{"key"})
-- per service
+
+ -- The consumer label indicates the name of consumer corresponds to the
+ -- request to the route/service, it will be an empty string if there is
+ -- no consumer in request.
metrics.status = prometheus:counter("http_status",
"HTTP status codes per service in APISIX",
- {"code", "route", "service", "node"})
+ {"code", "route", "service", "consumer", "node"})
metrics.latency = prometheus:histogram("http_latency",
"HTTP request latency in milliseconds per service in APISIX",
- {"type", "service", "node"}, DEFAULT_BUCKETS)
+ {"type", "service", "consumer", "node"}, DEFAULT_BUCKETS)
metrics.overhead = prometheus:histogram("http_overhead",
"HTTP request overhead added by APISIX in milliseconds per service " ..
"in APISIX",
- {"type", "service", "node"}, DEFAULT_BUCKETS)
+ {"type", "service", "consumer", "node"}, DEFAULT_BUCKETS)
metrics.bandwidth = prometheus:counter("bandwidth",
"Total bandwidth in bytes consumed per service in APISIX",
- {"type", "route", "service", "node"})
+ {"type", "route", "service", "consumer", "node"})
end
@@ -122,6 +126,7 @@ function _M.log(conf, ctx)
local route_id = ""
local balancer_ip = ctx.balancer_ip or ""
local service_id
+ local consumer_id = ctx.consumer_id or ""
local matched_route = ctx.matched_route and ctx.matched_route.value
if matched_route then
@@ -132,24 +137,24 @@ function _M.log(conf, ctx)
end
metrics.status:inc(1,
- gen_arr(vars.status, route_id, service_id, balancer_ip))
+ gen_arr(vars.status, route_id, service_id, consumer_id, balancer_ip))
local latency = (ngx.now() - ngx.req.start_time()) * 1000
metrics.latency:observe(latency,
- gen_arr("request", service_id, balancer_ip))
+ gen_arr("request", service_id, consumer_id, balancer_ip))
local overhead = latency
if ctx.var.upstream_response_time then
overhead = overhead - tonumber(ctx.var.upstream_response_time) * 1000
end
metrics.overhead:observe(overhead,
- gen_arr("request", service_id, balancer_ip))
+ gen_arr("request", service_id, consumer_id, balancer_ip))
metrics.bandwidth:inc(vars.request_length,
- gen_arr("ingress", route_id, service_id, balancer_ip))
+ gen_arr("ingress", route_id, service_id, consumer_id, balancer_ip))
metrics.bandwidth:inc(vars.bytes_sent,
- gen_arr("egress", route_id, service_id, balancer_ip))
+ gen_arr("egress", route_id, service_id, consumer_id, balancer_ip))
end
diff --git a/t/plugin/prometheus.t b/t/plugin/prometheus.t
index 63c8670..451e9c2 100644
--- a/t/plugin/prometheus.t
+++ b/t/plugin/prometheus.t
@@ -151,7 +151,7 @@ apisix_etcd_reachable 1
--- request
GET /apisix/prometheus/metrics
--- response_body eval
-qr/apisix_bandwidth\{type="egress",route="1",service="",node="127.0.0.1"\} \d+/
+qr/apisix_bandwidth\{type="egress",route="1",service="",consumer="",node="127.0.0.1"\} \d+/
--- no_error_log
[error]
@@ -293,7 +293,7 @@ passed
--- request
GET /apisix/prometheus/metrics
--- response_body eval
-qr/apisix_bandwidth\{type="egress",route="1",service="",node="127.0.0.1"\} \d+/
+qr/apisix_bandwidth\{type="egress",route="1",service="",consumer="",node="127.0.0.1"\} \d+/
--- no_error_log
[error]
@@ -303,7 +303,7 @@ qr/apisix_bandwidth\{type="egress",route="1",service="",node="127.0.0.1"\} \d+/
--- request
GET /apisix/prometheus/metrics
--- response_body eval
-qr/apisix_http_latency_count\{type="request",service="",node="127.0.0.1"\} \d+/
+qr/apisix_http_latency_count\{type="request",service="",consumer="",node="127.0.0.1"\} \d+/
--- no_error_log
[error]
@@ -386,7 +386,7 @@ passed
--- request
GET /apisix/prometheus/metrics
--- response_body eval
-qr/apisix_bandwidth\{type="egress",route="2",service="1",node="127.0.0.1"\} \d+/
+qr/apisix_bandwidth\{type="egress",route="2",service="1",consumer="",node="127.0.0.1"\} \d+/
--- no_error_log
[error]
@@ -521,7 +521,7 @@ passed
--- request
GET /apisix/prometheus/metrics
--- response_body eval
-qr/apisix_http_status\{code="404",route="3",service="",node="127.0.0.1"\} 2/
+qr/apisix_http_status\{code="404",route="3",service="",consumer="",node="127.0.0.1"\} 2/
--- no_error_log
[error]
@@ -809,3 +809,125 @@ qr/apisix_/
qr/etcd/
--- no_error_log
[error]
+
+
+
+=== TEST 45: set route with key-auth enabled for consumer metrics
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "prometheus": {},
+ "key-auth": {}
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ },
+ "uri": "/hello"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 46: pipeline of client request without api-key
+--- pipelined_requests eval
+["GET /hello", "GET /hello", "GET /hello", "GET /hello"]
+--- error_code eval
+[401, 401, 401, 401]
+--- no_error_log
+[error]
+
+
+
+=== TEST 47: fetch the prometheus metric data: consumer is empty
+--- request
+GET /apisix/prometheus/metrics
+--- response_body eval
+qr/apisix_bandwidth\{type="egress",route="1",service="",consumer="",node="127.0.0.1"\} \d+/
+--- no_error_log
+[error]
+
+
+
+=== TEST 48: set consumer for metics data collection
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/consumers/1',
+ ngx.HTTP_PUT,
+ [[{
+ "username": "jack",
+ "plugins": {
+ "key-auth": {
+ "key": "auth-one"
+ }
+ }
+ }]],
+ [[{
+ "node": {
+ "value": {
+ "username": "jack",
+ "plugins": {
+ "key-auth": {
+ "key": "auth-one"
+ }
+ }
+ }
+ },
+ "action": "set"
+ }]]
+ )
+
+ ngx.status = code
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 49: pipeline of client request with successfuly authorized
+--- pipelined_requests eval
+["GET /hello", "GET /hello", "GET /hello", "GET /hello"]
+--- more_headers
+apikey: auth-one
+--- error_code eval
+[200, 200, 200, 200]
+--- no_error_log
+[error]
+
+
+
+=== TEST 50: fetch the prometheus metric data: consumer is jack
+--- request
+GET /apisix/prometheus/metrics
+--- response_body eval
+qr/apisix_http_status\{code="200",route="1",service="",consumer="jack",node="127.0.0.1"\} \d+/
+--- no_error_log
+[error]