You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@apisix.apache.org by me...@apache.org on 2020/01/08 06:00:38 UTC
[incubator-apisix] branch master updated: plugin(prometheus): added
new field (#1028)
This is an automated email from the ASF dual-hosted git repository.
membphis pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-apisix.git
The following commit(s) were added to refs/heads/master by this push:
new 21c06b3 plugin(prometheus): added new field (#1028)
21c06b3 is described below
commit 21c06b3cc43b62e45f8813f6dd22f24bae702ee1
Author: YuanSheng Wang <me...@gmail.com>
AuthorDate: Wed Jan 8 14:00:31 2020 +0800
plugin(prometheus): added new field (#1028)
feature: added `router_id` for http code metric.
feature: added `hostname` for nginx connection metric.
---
lua/apisix/plugins/prometheus/exporter.lua | 27 ++++---
t/plugin/prometheus.t | 113 ++++++++++++++++++++++++++++-
2 files changed, 125 insertions(+), 15 deletions(-)
diff --git a/lua/apisix/plugins/prometheus/exporter.lua b/lua/apisix/plugins/prometheus/exporter.lua
index 691d7a6..81ff31e 100644
--- a/lua/apisix/plugins/prometheus/exporter.lua
+++ b/lua/apisix/plugins/prometheus/exporter.lua
@@ -47,7 +47,7 @@ function _M.init()
-- per service
metrics.status = prometheus:counter("http_status",
"HTTP status codes per service in APISIX",
- {"code", "service", "node"})
+ {"code", "route", "service", "node"})
metrics.latency = prometheus:histogram("http_latency",
"HTTP request latency per service in APISIX",
@@ -55,31 +55,34 @@ function _M.init()
metrics.bandwidth = prometheus:counter("bandwidth",
"Total bandwidth in bytes consumed per service in APISIX",
- {"type", "service", "node"})
+ {"type", "route", "service", "node"})
end
function _M.log(conf, ctx)
local vars = ctx.var
- local service_name
- if ctx.matched_route and ctx.matched_route.value then
- service_name = ctx.matched_route.value.desc or
- ctx.matched_route.value.id
+ local route_id = ""
+ local balancer_ip = ctx.balancer_ip
+ local service_id
+
+ local matched_route = ctx.matched_route and ctx.matched_route.value
+ if matched_route then
+ service_id = matched_route.service_id or ""
+ route_id = matched_route.id
else
- service_name = vars.host
+ service_id = vars.host
end
- local balancer_ip = ctx.balancer_ip
- metrics.status:inc(1, vars.status, service_name, balancer_ip)
+ metrics.status:inc(1, vars.status, route_id, service_id, balancer_ip)
local latency = (ngx.now() - ngx.req.start_time()) * 1000
- metrics.latency:observe(latency, "request", service_name, balancer_ip)
+ metrics.latency:observe(latency, "request", service_id, balancer_ip)
- metrics.bandwidth:inc(vars.request_length, "ingress", service_name,
+ metrics.bandwidth:inc(vars.request_length, "ingress", route_id, service_id,
balancer_ip)
- metrics.bandwidth:inc(vars.bytes_sent, "egress", service_name,
+ metrics.bandwidth:inc(vars.bytes_sent, "egress", route_id, service_id,
balancer_ip)
end
diff --git a/t/plugin/prometheus.t b/t/plugin/prometheus.t
index 06d9cae..18fa3ed 100644
--- a/t/plugin/prometheus.t
+++ b/t/plugin/prometheus.t
@@ -151,7 +151,7 @@ apisix_etcd_reachable 1
--- request
GET /apisix/prometheus/metrics
--- response_body eval
-qr/apisix_bandwidth\{type="egress",service="1",node="127.0.0.1"\} \d+/
+qr/apisix_bandwidth\{type="egress",route="1",service="",node="127.0.0.1"\} \d+/
--- no_error_log
[error]
@@ -293,7 +293,7 @@ passed
--- request
GET /apisix/prometheus/metrics
--- response_body eval
-qr/apisix_bandwidth\{type="egress",service="1",node="127.0.0.1"\} \d+/
+qr/apisix_bandwidth\{type="egress",route="1",service="",node="127.0.0.1"\} \d+/
--- no_error_log
[error]
@@ -303,6 +303,113 @@ qr/apisix_bandwidth\{type="egress",service="1",node="127.0.0.1"\} \d+/
--- request
GET /apisix/prometheus/metrics
--- response_body eval
-qr/apisix_http_latency_count\{type="request",service="1",node="127.0.0.1"\} \d+/
+qr/apisix_http_latency_count\{type="request",service="",node="127.0.0.1"\} \d+/
+--- no_error_log
+[error]
+
+
+
+=== TEST 15: create service
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/services/1',
+ ngx.HTTP_PUT,
+ [[{
+ "plugins": {
+ "prometheus": {}
+ },
+ "upstream": {
+ "nodes": {
+ "127.0.0.1:1980": 1
+ },
+ "type": "roundrobin"
+ }
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 16: use service 1 in route 1
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/2',
+ ngx.HTTP_PUT,
+ [[{
+ "service_id": 1,
+ "uri": "/hello1"
+ }]]
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 17: pipeline of client request
+--- pipelined_requests eval
+["GET /hello1", "GET /not_found", "GET /hello1", "GET /hello1"]
+--- error_code eval
+[200, 404, 200, 200]
+--- no_error_log
+[error]
+
+
+
+=== TEST 18: fetch the prometheus metric data
+--- request
+GET /apisix/prometheus/metrics
+--- response_body eval
+qr/apisix_bandwidth\{type="egress",route="2",service="1",node="127.0.0.1"\} \d+/
+--- no_error_log
+[error]
+
+
+
+=== TEST 19: delete route 2
+--- config
+ location /t {
+ content_by_lua_block {
+ local t = require("lib.test_admin").test
+ local code, body = t('/apisix/admin/routes/2',
+ ngx.HTTP_DELETE
+ )
+
+ if code >= 300 then
+ ngx.status = code
+ end
+ ngx.say(body)
+ }
+ }
+--- request
+GET /t
+--- response_body
+passed
--- no_error_log
[error]