You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@apisix.apache.org by GitBox <gi...@apache.org> on 2022/08/10 14:09:26 UTC

[GitHub] [apisix] ccxhwmy opened a new pull request, #7643: add elasticsearch-logging base function

ccxhwmy opened a new pull request, #7643:
URL: https://github.com/apache/apisix/pull/7643

   ### Description
   
   <!-- Please include a summary of the change and which issue is fixed. -->
   <!-- Please also include relevant motivation and context. -->
   
   Fixes # [issue](https://github.com/apache/apisix/issues/7636)
   
   ### Checklist
   
   - [ ] I have explained the need for this PR and the problem it solves
   - [ ] I have explained the changes or the new features added to this PR
   - [ ] I have added tests corresponding to this change
   - [ ] I have updated the documentation to reflect this change
   - [ ] I have verified that this change is backward compatible (If not, please discuss on the [APISIX mailing list](https://github.com/apache/apisix/tree/master#community) first)
   
   <!--
   
   Note
   
   1. Mark the PR as draft until it's ready to be reviewed.
   2. Always add/update tests for any changes unless you have a good reason.
   3. Always update the documentation to reflect the changes made in the PR.
   4. Make a new commit to resolve conversations instead of `push -f`.
   5. To resolve merge conflicts, merge master instead of rebasing.
   6. Use "request review" to notify the reviewer after making changes.
   7. Only a reviewer can mark a conversation as resolved.
   
   -->
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945464070


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)
+    return core.json.encode({
+            create = {
+                _index = conf.endpoint.index,
+                _type = conf.endpoint.type
+            }
+        }) .. "\n" ..
+        core.json.encode({
+            time = ngx_now(),
+            host = entry.server.hostname,
+            source = DEFAULT_ELASTICSEARCH_SOURCE,
+            request_url = entry.request.url,
+            request_method = entry.request.method,
+            request_headers = entry.request.headers,
+            request_query = entry.request.querystring,
+            request_size = entry.request.size,
+            response_headers = entry.response.headers,
+            response_status = entry.response.status,
+            response_size = entry.response.size,
+            latency = entry.latency,
+            upstream = entry.upstream,
+        }) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint.uri ..
+        (str_sub(conf.endpoint.uri, -1) == "/" and "_bulk" or "/_bulk")
+    local body = core.table.concat(entries, "")
+    local headers = {["Content-Type"] = "application/json"}
+    if conf.endpoint.username and conf.endpoint.password then
+        local authorization = "Basic " .. ngx.encode_base64(
+            conf.endpoint.username .. ":" .. conf.endpoint.password
+        )
+        headers["Authorization"] = authorization
+    end
+
+    core.log.info("uri: ", uri, ", body: ", body, ", headers: ", core.json.encode(headers))

Review Comment:
   This `headers` may contain `username` and `passowrd` in base64 format, which we should not output in the log for security reasons.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1215102647

   > Thank you for your suggestions, there are helpful to me.
   > Would you offer me some doc about document specification you are using, so that I can write docs more norm next.
   
   IMO, I learn the document specification from existing plugins docs.
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
spacewander commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1214360852

   @ccxhwmy 
   Could you fix the message so we can close the issue when this PR is merged?


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1214391258

   > @ccxhwmy Could you fix the message so we can close the issue when this PR is merged?
   
   I tryed to do it, but failed, it seems that I don't have permission.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r956983148


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,181 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = {
+            type = "string",
+            pattern = "[^/]$",
+        },
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                    minLength = 1
+                },
+                password = {
+                    type = "string",
+                    minLength = 1
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local metadata_schema = {
+    type = "object",
+    properties = {
+        log_format = log_util.metadata_schema_log_format,
+    },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+    metadata_schema = metadata_schema,
+}
+
+
+function _M.check_schema(conf, schema_type)
+    if schema_type == core.schema.TYPE_METADATA then
+        return core.schema.check(metadata_schema, conf)
+    end
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr .. "/_bulk"
+    local body = core.table.concat(entries, "")
+    local headers = {["Content-Type"] = "application/json"}

Review Comment:
   Would be better to use `application/x-ndjson`?



##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,181 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {

Review Comment:
   Where do we use this field?



##########
docs/zh/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,278 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API 网关
+  - 插件
+  - Elasticsearch-logger
+  - 日志
+description: 本文介绍了 API 网关 Apache APISIX 的 elasticsearch-logger 插件。使用该插件可以将 APISIX 的日志数据推送到 Elasticserach。
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储。
+
+启用该插件后 APISIX 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 Elaticsearch 中。更多信息,请参考 [Batch-Processor](./batch-processor.md)。
+
+## 属性
+
+| 名称          | 类型    | 必选项 | 默认值               | 描述                                                         |
+| ------------- | ------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint_addr | string  | 是       |                      | Elasticsearch API。                                           |
+| field         | array   | 是       |                      | Elasticsearch `field`配置信息。                                |
+| field.index   | string  | 是       |                      | Elasticsearch `[_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field)`。 |
+| field.type    | string  | 否       | Elasticsearch 默认值 | Elasticsearch `[_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field)` |
+| auth          | array   | 否       |                      | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 配置信息 |
+| auth.username | string  | 是       |                      | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 用户名。 |
+| auth.password | string  | 是       |                      | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 密码。 |
+| ssl_verify    | boolean | 否       | true                 | 当设置为 `true` 时则启用 SSL 验证。更多信息请参考 [lua-nginx-module](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake)。 |
+| timeout       | integer | 否       | 10                   | 发送给 Elasticsearch 请求超时时间。                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。
+
+## 启用插件
+
+你可以通过如下命令在指定路由上启用 `elasticsearch-logger` 插件:
+
+### 完整配置示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \

Review Comment:
   Would you update the port number to 9180 like the PR: https://github.com/apache/apisix/pull/7806?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950355186


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr ..
+        (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")
+    local body = core.table.concat(entries, "")
+    local headers = {["Content-Type"] = "application/json"}
+    if conf.auth and conf.auth.username and conf.auth.password then

Review Comment:
   The `username` and `password` are not required by the schema.
   If people config `username` and `password`, `elasticsearch-logger` must set `Authorization` in request header.
   If people is not configured them, then we do not need do set `Authorization` in request header.
   So `elasticsearch-logger` must check whether `username` and `password` is configured.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945832191


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)

Review Comment:
   yes



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] hf400159 commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
hf400159 commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945458945


##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---

Review Comment:
   can you add some keywords and description?



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md#配置) 配置部分。

Review Comment:
   ```suggestion
   本插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md#配置) 配置部分。
+
+## 如何开启
+
+下面例子展示了如何为指定路由开启 `elasticsearch-logging` 插件。
+
+### 完整配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services",
+                "type": "collector",
+                "timeout": 60,
+                "username": "elastic",
+                "password": "123456",
+                "ssl_verify": false
+            },
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "retry_delay":1,
+            "inactive_timeout":2,
+            "batch_max_size":10
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### 最小化配置

Review Comment:
   ```suggestion
   ### 最小化配置示例
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md#配置) 配置部分。
+
+## 如何开启
+
+下面例子展示了如何为指定路由开启 `elasticsearch-logging` 插件。
+
+### 完整配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services",
+                "type": "collector",
+                "timeout": 60,
+                "username": "elastic",
+                "password": "123456",
+                "ssl_verify": false
+            },
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "retry_delay":1,
+            "inactive_timeout":2,
+            "batch_max_size":10
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### 最小化配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## 测试插件
+
+- 向配置 `elasticsearch-logging` 插件的路由发送请求
+
+```shell
+$ curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+- 登录 Kibana 控制台检索查看日志
+
+![kibana search view](../../../assets/images/plugin/elasticsearch-admin-cn.png)
+
+## 禁用插件
+
+禁用 `elasticsearch-logging` 插件非常简单,只需将 `elasticsearch-logging` 对应的 `JSON` 配置移除即可。

Review Comment:
   ```suggestion
   当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务:```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |

Review Comment:
   Please refer to [http-logger](https://github.com/apache/apisix/blob/master/docs/zh/latest/plugins/http-logger.md) modification. 



##########
docs/en/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,143 @@
+---
+title: elasticsearch-logging
+keywords:
+  - APISIX
+  - Plugin
+  - Elasticsearch-logging
+description: This document contains information about the Apache APISIX elasticsearch-logging Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `elasticsearch-logging` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage.
+
+When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details.
+
+## Attributes
+
+| Name                | Required | Default                     | Description                                                  |
+| ------------------- | -------- | --------------------------- | ------------------------------------------------------------ |
+| endpoint            | True     |                             | Elasticsearch endpoint configurations.                       |
+| endpoint.uri        | True     |                             | Elasticsearch API endpoint.                                  |
+| endpoint.index      | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | False    | true                        | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). |
+| endpoint.timeout    | False    | 10                          | Elasticsearch send data timeout in seconds.                  |
+
+This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration.
+
+## Enabling the Plugin
+
+### Full configuration
+
+The example below shows a complete configuration of the Plugin on a specific Route:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services",
+                "type": "collector",
+                "timeout": 60,
+                "username": "elastic",
+                "password": "123456",
+                "ssl_verify": false
+            },
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "retry_delay":1,
+            "inactive_timeout":2,
+            "batch_max_size":10
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### Minimal configuration

Review Comment:
   ```suggestion
   ### Minimal configuration example
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---

Review Comment:
   please refer: https://github.com/apache/apisix/blob/master/docs/zh/latest/plugins/http-logger.md



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md#配置) 配置部分。
+
+## 如何开启
+
+下面例子展示了如何为指定路由开启 `elasticsearch-logging` 插件。

Review Comment:
   ```suggestion
   你可以通过如下命令在指定路由上启用 `elasticsearch-logging` 插件:
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md#配置) 配置部分。
+
+## 如何开启

Review Comment:
   ```suggestion
   ## 启用插件
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)

Review Comment:
   ```suggestion
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。

Review Comment:
   ```suggestion
   `elasticsearch-logging` 插件用于将 APISIX 的请求日志转发到 Elasticsearch 中进行分析和存储,启用该插件后 Apache APISIX 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 Elaticsearch 中。
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md#配置) 配置部分。
+
+## 如何开启
+
+下面例子展示了如何为指定路由开启 `elasticsearch-logging` 插件。
+
+### 完整配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '

Review Comment:
   ```suggestion
   curl http://127.0.0.1:9080/apisix/admin/routes/1 \
   -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md#配置) 配置部分。
+
+## 如何开启
+
+下面例子展示了如何为指定路由开启 `elasticsearch-logging` 插件。
+
+### 完整配置

Review Comment:
   ```suggestion
   ### 完整配置示例
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md#配置) 配置部分。
+
+## 如何开启
+
+下面例子展示了如何为指定路由开启 `elasticsearch-logging` 插件。
+
+### 完整配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services",
+                "type": "collector",
+                "timeout": 60,
+                "username": "elastic",
+                "password": "123456",
+                "ssl_verify": false
+            },
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "retry_delay":1,
+            "inactive_timeout":2,
+            "batch_max_size":10
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### 最小化配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## 测试插件
+
+- 向配置 `elasticsearch-logging` 插件的路由发送请求
+
+```shell
+$ curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello

Review Comment:
   ```suggestion
   curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md#配置) 配置部分。
+
+## 如何开启
+
+下面例子展示了如何为指定路由开启 `elasticsearch-logging` 插件。
+
+### 完整配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services",
+                "type": "collector",
+                "timeout": 60,
+                "username": "elastic",
+                "password": "123456",
+                "ssl_verify": false
+            },
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "retry_delay":1,
+            "inactive_timeout":2,
+            "batch_max_size":10
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### 最小化配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '

Review Comment:
   ```suggestion
   curl http://127.0.0.1:9080/apisix/admin/routes/1 \
   -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
   ```



##########
docs/en/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,143 @@
+---
+title: elasticsearch-logging
+keywords:
+  - APISIX
+  - Plugin
+  - Elasticsearch-logging
+description: This document contains information about the Apache APISIX elasticsearch-logging Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `elasticsearch-logging` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage.
+
+When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details.
+
+## Attributes
+
+| Name                | Required | Default                     | Description                                                  |
+| ------------------- | -------- | --------------------------- | ------------------------------------------------------------ |
+| endpoint            | True     |                             | Elasticsearch endpoint configurations.                       |
+| endpoint.uri        | True     |                             | Elasticsearch API endpoint.                                  |
+| endpoint.index      | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | False    | true                        | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). |
+| endpoint.timeout    | False    | 10                          | Elasticsearch send data timeout in seconds.                  |
+
+This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration.
+
+## Enabling the Plugin
+
+### Full configuration
+
+The example below shows a complete configuration of the Plugin on a specific Route:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services",
+                "type": "collector",
+                "timeout": 60,
+                "username": "elastic",
+                "password": "123456",
+                "ssl_verify": false
+            },
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "retry_delay":1,
+            "inactive_timeout":2,
+            "batch_max_size":10
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### Minimal configuration
+
+The example below shows a bare minimum configuration of the Plugin on a Route:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## Example usage
+
+Once you have configured the Route to use the Plugin, when you make a request to APISIX, it will be logged in your Elasticsearch server:
+
+```shell
+$ curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello

Review Comment:
   ```suggestion
   curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
   ```



##########
docs/en/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,143 @@
+---
+title: elasticsearch-logging
+keywords:
+  - APISIX
+  - Plugin
+  - Elasticsearch-logging
+description: This document contains information about the Apache APISIX elasticsearch-logging Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `elasticsearch-logging` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage.
+
+When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details.
+
+## Attributes
+
+| Name                | Required | Default                     | Description                                                  |
+| ------------------- | -------- | --------------------------- | ------------------------------------------------------------ |
+| endpoint            | True     |                             | Elasticsearch endpoint configurations.                       |
+| endpoint.uri        | True     |                             | Elasticsearch API endpoint.                                  |
+| endpoint.index      | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | False    | true                        | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). |
+| endpoint.timeout    | False    | 10                          | Elasticsearch send data timeout in seconds.                  |
+
+This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration.
+
+## Enabling the Plugin
+
+### Full configuration
+
+The example below shows a complete configuration of the Plugin on a specific Route:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services",
+                "type": "collector",
+                "timeout": 60,
+                "username": "elastic",
+                "password": "123456",
+                "ssl_verify": false
+            },
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "retry_delay":1,
+            "inactive_timeout":2,
+            "batch_max_size":10
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### Minimal configuration
+
+The example below shows a bare minimum configuration of the Plugin on a Route:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '

Review Comment:
   ```suggestion
   curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md#配置) 配置部分。
+
+## 如何开启
+
+下面例子展示了如何为指定路由开启 `elasticsearch-logging` 插件。
+
+### 完整配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services",
+                "type": "collector",
+                "timeout": 60,
+                "username": "elastic",
+                "password": "123456",
+                "ssl_verify": false
+            },
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "retry_delay":1,
+            "inactive_timeout":2,
+            "batch_max_size":10
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### 最小化配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## 测试插件
+
+- 向配置 `elasticsearch-logging` 插件的路由发送请求
+
+```shell
+$ curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+- 登录 Kibana 控制台检索查看日志

Review Comment:
   ```suggestion
   现在,你可以登录 Kibana 控制台检索查看相关日志。
   ```



##########
docs/en/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,143 @@
+---
+title: elasticsearch-logging
+keywords:
+  - APISIX
+  - Plugin

Review Comment:
   ```suggestion
     - API Gateway
     - Plugin
   ```



##########
docs/zh/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,136 @@
+---
+title: elasticsearch-logging
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logging` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称                | 是否必需 | 默认值               | 描述                                                         |
+| ------------------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint            | 必选     |                      | Elasticsearch 端点配置信息                                   |
+| endpoint.uri        | 必选     |                      | Elasticsearch API                                            |
+| endpoint.index      | 必选     |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | 可选     | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | 可选     |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | 可选     | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| endpoint.timeout    | 可选     | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md#配置) 配置部分。
+
+## 如何开启
+
+下面例子展示了如何为指定路由开启 `elasticsearch-logging` 插件。
+
+### 完整配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services",
+                "type": "collector",
+                "timeout": 60,
+                "username": "elastic",
+                "password": "123456",
+                "ssl_verify": false
+            },
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "retry_delay":1,
+            "inactive_timeout":2,
+            "batch_max_size":10
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### 最小化配置
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## 测试插件
+
+- 向配置 `elasticsearch-logging` 插件的路由发送请求
+
+```shell
+$ curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+- 登录 Kibana 控制台检索查看日志
+
+![kibana search view](../../../assets/images/plugin/elasticsearch-admin-cn.png)
+
+## 禁用插件
+
+禁用 `elasticsearch-logging` 插件非常简单,只需将 `elasticsearch-logging` 对应的 `JSON` 配置移除即可。
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '

Review Comment:
   ```suggestion
   curl http://127.0.0.1:9080/apisix/admin/routes/1 \
   -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
   ```



##########
docs/en/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,143 @@
+---
+title: elasticsearch-logging
+keywords:
+  - APISIX
+  - Plugin
+  - Elasticsearch-logging
+description: This document contains information about the Apache APISIX elasticsearch-logging Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `elasticsearch-logging` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage.
+
+When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details.
+
+## Attributes
+
+| Name                | Required | Default                     | Description                                                  |
+| ------------------- | -------- | --------------------------- | ------------------------------------------------------------ |
+| endpoint            | True     |                             | Elasticsearch endpoint configurations.                       |
+| endpoint.uri        | True     |                             | Elasticsearch API endpoint.                                  |
+| endpoint.index      | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | False    | true                        | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). |
+| endpoint.timeout    | False    | 10                          | Elasticsearch send data timeout in seconds.                  |
+
+This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration.
+
+## Enabling the Plugin
+
+### Full configuration
+
+The example below shows a complete configuration of the Plugin on a specific Route:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services",
+                "type": "collector",
+                "timeout": 60,
+                "username": "elastic",
+                "password": "123456",
+                "ssl_verify": false
+            },
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "retry_delay":1,
+            "inactive_timeout":2,
+            "batch_max_size":10
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### Minimal configuration
+
+The example below shows a bare minimum configuration of the Plugin on a Route:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "splunk-hec-logging":{
+            "endpoint":{
+                "uri": "http://127.0.0.1:9200",
+                "index": "services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## Example usage
+
+Once you have configured the Route to use the Plugin, when you make a request to APISIX, it will be logged in your Elasticsearch server:
+
+```shell
+$ curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+You should be able to login and search these logs from your Kibana discover:
+
+![kibana search view](../../../assets/images/plugin/elasticsearch-admin-en.png)
+
+## Disable Plugin
+
+To disable the `elasticsearch-logging` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect.
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '

Review Comment:
   ```suggestion
   curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
   ```



##########
docs/en/latest/plugins/elasticsearch-logging.md:
##########
@@ -0,0 +1,143 @@
+---
+title: elasticsearch-logging
+keywords:
+  - APISIX
+  - Plugin
+  - Elasticsearch-logging
+description: This document contains information about the Apache APISIX elasticsearch-logging Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `elasticsearch-logging` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage.
+
+When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details.
+
+## Attributes
+
+| Name                | Required | Default                     | Description                                                  |
+| ------------------- | -------- | --------------------------- | ------------------------------------------------------------ |
+| endpoint            | True     |                             | Elasticsearch endpoint configurations.                       |
+| endpoint.uri        | True     |                             | Elasticsearch API endpoint.                                  |
+| endpoint.index      | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| endpoint.type       | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| endpoint.username   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| endpoint.password   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| endpoint.ssl_verify | False    | true                        | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). |
+| endpoint.timeout    | False    | 10                          | Elasticsearch send data timeout in seconds.                  |
+
+This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration.
+
+## Enabling the Plugin
+
+### Full configuration
+
+The example below shows a complete configuration of the Plugin on a specific Route:
+
+```shell
+$ curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '

Review Comment:
   ```suggestion
   curl http://127.0.0.1:9080/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951371097


##########
docs/en/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,206 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API Gateway
+  - Plugin
+  - Elasticsearch-logger
+description: This document contains information about the Apache APISIX elasticsearch-logger Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `elasticsearch-logger` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage.
+
+When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details.
+
+## Attributes
+
+| Name          | Type    | Required | Default                     | Description                                                  |
+| ------------- | ------- | -------- | --------------------------- | ------------------------------------------------------------ |
+| endpoint_addr | string  | True     |                             | Elasticsearch API                                            |
+| field         | array   | True     |                             | Elasticsearch `field` configuration                          |
+| field.index   | string  | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | string  | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | array   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) configuration |
+| auth.username | string  | True     |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| auth.password | string  | True     |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| ssl_verify    | boolean | False    | true                        | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). |
+| timeout       | integer | False    | 10                          | Elasticsearch send data timeout in seconds.                  |
+
+This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration.
+
+## Enabling the Plugin
+
+### Full configuration
+
+The example below shows a complete configuration of the Plugin on a specific Route:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services",
+                "type":"collector"
+            },
+            "auth":{
+                "username":"elastic",
+                "password":"123456"
+            },
+            "ssl_verify":false,
+            "timeout": 60,
+            "retry_delay":1,
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "batch_max_size":1000,
+            "inactive_timeout":5,
+            "name":"elasticsearch-logger"
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### Minimal configuration example
+
+The example below shows a bare minimum configuration of the Plugin on a Route:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## Example usage
+
+Once you have configured the Route to use the Plugin, when you make a request to APISIX, it will be logged in your Elasticsearch server:
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+You should be able to login and search these logs from your Kibana discover:
+
+![kibana search view](../../../assets/images/plugin/elasticsearch-admin-en.png)
+
+## Metadata
+
+You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available:
+
+| Name       | Type   | Required | Default                                                      | Description                                                  |
+| ---------- | ------ | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| log_format | object | False    | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](https://github.com/apache/apisix/blob/master/docs/en/latest/apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. |
+
+:::info IMPORTANT
+
+Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `elasticsearch-logger` Plugin.
+
+:::
+
+The example below shows how you can configure through the Admin API:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/elasticsearch-logger \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "log_format": {
+        "host": "$host",
+        "@timestamp": "$time_iso8601",
+        "client_ip": "$remote_addr"
+    }
+}'
+```
+
+With this configuration, your logs would be formatted as shown below:
+
+```shell
+{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"}
+{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"}
+```
+
+ make a request to APISIX again:
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello

Review Comment:
   I get it, thank you.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950837813


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr ..
+        (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")
+    local body = core.table.concat(entries, "")
+    local headers = {["Content-Type"] = "application/json"}
+    if conf.auth and conf.auth.username and conf.auth.password then

Review Comment:
   Actually, I mean `if conf.auth then`. If `conf.auth` is given, the `username` and `password` should exist. So we don't need to check it.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950839684


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr ..
+        (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")

Review Comment:
   @ccxhwmy 
   Sorry, I can't get your point. We can use a simple pattern that checks if the configuration doesn't end with `/`. Regularly, an `addr` should not end with `/`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951027236


##########
docs/zh/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,197 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API 网关
+  - 插件
+  - Elasticsearch-logger
+  - 日志
+description: 本文介绍了 API 网关 Apache APISIX 的 elasticsearch-logger 插件。使用该插件可以将 APISIX 的日志数据推送到 Elasticserach。
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称          | 类型    | 是否必需 | 默认值               | 描述                                                         |
+| ------------- | ------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint_addr | string  | 是       |                      | Elasticsearch API                                            |
+| field         | array   | 是       |                      | Elasticsearch `field`配置信息                                |
+| field.index   | string  | 是       |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | string  | 否       | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | array   | 否       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) 配置信息 |
+| auth.username | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| auth.password | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| ssl_verify    | boolean | 否       | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| timeout       | integer | 否       | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。
+
+## 启用插件
+
+你可以通过如下命令在指定路由上启用 `elasticsearch-logger` 插件:
+
+### 完整配置示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services",
+                "type":"collector"
+            },
+            "auth":{
+                "username":"elastic",
+                "password":"123456"
+            },
+            "ssl_verify":false,
+            "timeout": 60,
+            "retry_delay":1,
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "batch_max_size":1000,
+            "inactive_timeout":5,
+            "name":"elasticsearch-logger"
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### 最小化配置示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## 测试插件
+
+向配置 `elasticsearch-logger` 插件的路由发送请求
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+现在,你可以登录 Kibana 控制台检索查看相关日志。

Review Comment:
   > I was wondering if we could view this log through elasticsearch's retrieval API instead of Kibana. Just an idea.
   
   I do not know which one is better, how about use the both method?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945462579


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)
+    return core.json.encode({
+            create = {
+                _index = conf.endpoint.index,
+                _type = conf.endpoint.type
+            }
+        }) .. "\n" ..
+        core.json.encode({
+            time = ngx_now(),
+            host = entry.server.hostname,

Review Comment:
   we just need to use `log_util.get_req_original(ctx, conf)` to get `entry`, This is a json, and ES supports this format.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r953256395


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,445 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE)

Review Comment:
   ```suggestion
               local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
                                  ngx.HTTP_DELETE)
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r953662074


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )

Review Comment:
   > Perhaps you can add this to the documentation later: [https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md](https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md?rgh-link-date=2022-08-23T01%3A45%3A21Z)
   
   Need we open a issue to discuss about it?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r953416672


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,433 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 6: set route (no auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 7: test route (no auth, failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 8: set route (error auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "111111"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 9: test route (error auth failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 10: add plugin metadata
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_PUT,
+                [[{
+                    "log_format": {
+                        "custom_host": "$host",
+                        "custom_timestamp": "$time_iso8601",
+                        "custom_client_ip": "$remote_addr"
+                    }
+                }]]
+                )
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 12: hit route and report custom elasticsearch logger
+--- extra_yaml_config
+nginx_config:
+    error_log_level:  info

Review Comment:
   > @ccxhwmy For this purpose, you can use `--- log_level: info`.
   
   Thank you.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945416796


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)
+    return core.json.encode({
+            create = {
+                _index = conf.endpoint.index,
+                _type = conf.endpoint.type
+            }
+        }) .. "\n" ..
+        core.json.encode({
+            time = ngx_now(),
+            host = entry.server.hostname,

Review Comment:
   > Why should we invent a format structure for a specific plugin?
   
   I refer to `splunk-hec-logging`: https://github.com/apache/apisix/blob/689e4f11f4c42d3d1bf5cdf82d5aaa3976fc715d/apisix/plugins/splunk-hec-logging.lua#L77
   
   How about `core.json.encode(entry)` directly?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945835824


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)
+    return core.json.encode({
+            create = {
+                _index = conf.endpoint.index,
+                _type = conf.endpoint.type
+            }
+        }) .. "\n" ..
+        core.json.encode({
+            time = ngx_now(),
+            host = entry.server.hostname,

Review Comment:
   my mistake. json string is ok. we can follow this.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945708290


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)

Review Comment:
   > We do not reference the code of another plugin in one plugin. Unless we pull some generic code into a common module.
   
   > Please also support the custom log format.
   
   Is the `custom log format` like:
   https://github.com/apache/apisix/blob/master/docs/en/latest/plugins/kafka-logger.md#metadata



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950839174


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,433 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 6: set route (no auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 7: test route (no auth, failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 8: set route (error auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "111111"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 9: test route (error auth failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 10: add plugin metadata
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_PUT,
+                [[{
+                    "log_format": {
+                        "custom_host": "$host",
+                        "custom_timestamp": "$time_iso8601",
+                        "custom_client_ip": "$remote_addr"
+                    }
+                }]]
+                )
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 12: hit route and report custom elasticsearch logger
+--- extra_yaml_config
+nginx_config:
+    error_log_level:  info
+--- request
+GET /hello
+--- response_body
+hello world
+--- wait: 2
+--- error_log
+custom log format entry:
+
+
+
+=== TEST 13: hit route and check custom elasticsearch logger
+--- extra_init_by_lua
+    local core = require("apisix.core")
+    local http = require("resty.http")
+    local ngx_re = require("ngx.re")
+    http.request_uri = function(self, uri, params)
+        if not params.body or type(params.body) ~= "string" then
+            return nil, "invalid params body"
+        end
+
+        local arr = ngx_re.split(params.body, "\n")
+        if not arr or #arr ~= 2 then
+            return nil, "invalid params body"
+        end
+
+        entry = core.json.decode(arr[2])
+        if not entry["custom_host"] then

Review Comment:
   Let's check the value of `entry["custom_host"]` too.



##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,433 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 6: set route (no auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 7: test route (no auth, failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 8: set route (error auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "111111"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 9: test route (error auth failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 10: add plugin metadata
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_PUT,
+                [[{
+                    "log_format": {
+                        "custom_host": "$host",
+                        "custom_timestamp": "$time_iso8601",
+                        "custom_client_ip": "$remote_addr"
+                    }
+                }]]
+                )
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 12: hit route and report custom elasticsearch logger
+--- extra_yaml_config
+nginx_config:
+    error_log_level:  info
+--- request
+GET /hello
+--- response_body
+hello world
+--- wait: 2
+--- error_log
+custom log format entry:
+
+
+
+=== TEST 13: hit route and check custom elasticsearch logger

Review Comment:
   We should check the value for the default log format too.



##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,433 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 6: set route (no auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 7: test route (no auth, failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 8: set route (error auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "111111"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 9: test route (error auth failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 10: add plugin metadata
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_PUT,
+                [[{
+                    "log_format": {
+                        "custom_host": "$host",
+                        "custom_timestamp": "$time_iso8601",
+                        "custom_client_ip": "$remote_addr"
+                    }
+                }]]
+                )
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 12: hit route and report custom elasticsearch logger
+--- extra_yaml_config
+nginx_config:
+    error_log_level:  info

Review Comment:
   The error log level of the test framework is controlled by `log_level('xxx');`, not the yaml config.



##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,433 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t

Review Comment:
   We already set this section at the top of the file.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951342853


##########
docs/zh/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,197 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API 网关
+  - 插件
+  - Elasticsearch-logger
+  - 日志
+description: 本文介绍了 API 网关 Apache APISIX 的 elasticsearch-logger 插件。使用该插件可以将 APISIX 的日志数据推送到 Elasticserach。
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称          | 类型    | 是否必需 | 默认值               | 描述                                                         |
+| ------------- | ------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint_addr | string  | 是       |                      | Elasticsearch API                                            |
+| field         | array   | 是       |                      | Elasticsearch `field`配置信息                                |
+| field.index   | string  | 是       |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | string  | 否       | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | array   | 否       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) 配置信息 |
+| auth.username | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| auth.password | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| ssl_verify    | boolean | 否       | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| timeout       | integer | 否       | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。
+
+## 启用插件
+
+你可以通过如下命令在指定路由上启用 `elasticsearch-logger` 插件:
+
+### 完整配置示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services",
+                "type":"collector"
+            },
+            "auth":{
+                "username":"elastic",
+                "password":"123456"
+            },
+            "ssl_verify":false,
+            "timeout": 60,
+            "retry_delay":1,
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "batch_max_size":1000,
+            "inactive_timeout":5,
+            "name":"elasticsearch-logger"
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### 最小化配置示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## 测试插件
+
+向配置 `elasticsearch-logger` 插件的路由发送请求
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+现在,你可以登录 Kibana 控制台检索查看相关日志。

Review Comment:
   Yes, this would be more concise, I'll modify it later.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951339957


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )

Review Comment:
   > we have `no_shuffle` on the boot of this test file, this means that the test cases will be executed in order.
   > 
   > I think the existence of `apisix/admin/plugin_metadata/elasticsearch-logger` should be determined as far as the test file ` t/plugin/elasticsearch-logger.t` is concerned.
   > 
   > One CI test detail: before running a new test file each time, the test shell clears ETCD and initializes etcd with `apisix init_etcd` to prevent dirty data from other test files from affecting the current test file.
   
   Yes, but it failed when I run `elasticsearch-logger.t` only:
   ```shell
   prove -I../test_nginx/lib -r t/plugin/elasticsearch-logger.t
   ```
   I think twice, maybe it is better to add the `DELETE` code, how about add a note to explain it?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r949724071


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+    and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"

Review Comment:
   from: https://github.com/apache/apisix/blob/308bdd967792bb7b34355b98dcf5d6cdeae078f6/apisix/plugins/kafka-logger.lua#L177-L199
   
   the `entry` is just `log_util.get_custom_format_log(ctx, metadata.value.log_format)` or `log_util.get_full_log(ngx, conf)`?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r953254030


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,183 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = {
+            type = "string",
+            pattern = "[^/]$",
+        },
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                    minLength = 1
+                },
+                password = {
+                    type = "string",
+                    minLength = 1
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local metadata_schema = {
+    type = "object",
+    properties = {
+        log_format = log_util.metadata_schema_log_format,
+    },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+    metadata_schema = metadata_schema,
+}
+
+
+function _M.check_schema(conf, schema_type)
+    if schema_type == core.schema.TYPE_METADATA then
+        return core.schema.check(metadata_schema, conf)
+    end
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr ..
+        (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")

Review Comment:
   ```suggestion
       local uri = conf.endpoint_addr ..
                   (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r953416939


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr ..
+        (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")

Review Comment:
   > @ccxhwmy What about:
   > 
   > ```
   > addr = {
   > 	type = "string",
   > 	pattern = "[^/]$",
   > }
   > ```
   
   Thank you.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1231165321

   > > Need we re-run the ci?
   > 
   > done
   
   @tzssangglass Please re-run ci again, Thanks.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945705071


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)
+    return core.json.encode({
+            create = {
+                _index = conf.endpoint.index,
+                _type = conf.endpoint.type
+            }
+        }) .. "\n" ..
+        core.json.encode({
+            time = ngx_now(),
+            host = entry.server.hostname,

Review Comment:
   > we just need to use `log_util.get_req_original(ctx, conf)` to get `entry`, This is a json, and ES supports this format.
   
   I found that `log_util.get_req_original(ctx, conf)` will return a string instead of json:
   https://github.com/apache/apisix/blob/c4d5f2fca5b9ef98b551c769e2b1565185ed9630/apisix/utils/log-util.lua#L196-L211



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945464487


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)
+    return core.json.encode({
+            create = {
+                _index = conf.endpoint.index,
+                _type = conf.endpoint.type
+            }
+        }) .. "\n" ..
+        core.json.encode({
+            time = ngx_now(),
+            host = entry.server.hostname,
+            source = DEFAULT_ELASTICSEARCH_SOURCE,
+            request_url = entry.request.url,
+            request_method = entry.request.method,
+            request_headers = entry.request.headers,
+            request_query = entry.request.querystring,
+            request_size = entry.request.size,
+            response_headers = entry.response.headers,
+            response_status = entry.response.status,
+            response_size = entry.response.size,
+            latency = entry.latency,
+            upstream = entry.upstream,
+        }) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint.uri ..
+        (str_sub(conf.endpoint.uri, -1) == "/" and "_bulk" or "/_bulk")
+    local body = core.table.concat(entries, "")
+    local headers = {["Content-Type"] = "application/json"}
+    if conf.endpoint.username and conf.endpoint.password then
+        local authorization = "Basic " .. ngx.encode_base64(
+            conf.endpoint.username .. ":" .. conf.endpoint.password
+        )
+        headers["Authorization"] = authorization
+    end
+
+    core.log.info("uri: ", uri, ", body: ", body, ", headers: ", core.json.encode(headers))
+
+    httpc:set_timeout(conf.endpoint.timeout * 1000)
+    local resp, err = httpc:request_uri(uri, {
+        ssl_verify = conf.endpoint.ssl_verify,
+        method = "POST",
+        headers = headers,
+        body = body
+    })
+    if not resp then
+        return false,  str_format("RequestError: %s", err or "")

Review Comment:
   ```suggestion
           return nil,  err
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r946858681


##########
t/plugin/elasticsearch-logging.t:
##########
@@ -0,0 +1,299 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                        index = "services",
+                        type = "collector",
+                        timeout = 60,
+                        username = "elastic",
+                        password = "123456",
+                        ssl_verify = false
+                    },
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                        index = "services"
+                    }
+                },
+                -- property "uri" is required
+                {
+                    endpoint = {
+                        index = "services",
+                    }
+                },
+                -- property "index" is required
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                    }
+                },
+                -- property "uri" validation failed
+                {
+                    endpoint = {
+                        uri = "127.0.0.1:9200",
+                        index = "services"
+                    }
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logging")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint" validation failed: property "uri" is required
+property "endpoint" validation failed: property "index" is required
+property "endpoint" validation failed: property "uri" validation failed.*
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logging"] = {
+                        endpoint = {
+                            uri = "http://127.0.0.1:9200",
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logging"] = {
+                        endpoint = {
+                            uri = "http://127.0.0.1:9201",
+                            index = "services",
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body

Review Comment:
   > > We should check the data sent to the elasticsearch, via injecting like: [#7593 (comment)](https://github.com/apache/apisix/pull/7593#issuecomment-1210208755)
   > 
   > Does you mean I should hook the function `httpc:request_uri` and mock it with my own mock_request_uri, and check the request body with the mock_request_uri? If so, I am not quite understand the nessary to check request body.
   
   @spacewander 
   Please pay attention to this question when you are free, Thank you.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r949827068


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,301 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world

Review Comment:
   We need to check the `Batch Processor[...] successfully processed the entries` log to make sure it is successfully written.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950347362


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+    and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"

Review Comment:
   > from:
   > 
   > https://github.com/apache/apisix/blob/308bdd967792bb7b34355b98dcf5d6cdeae078f6/apisix/plugins/kafka-logger.lua#L177-L199
   > 
   > the `entry` is just `log_util.get_custom_format_log(ctx, metadata.value.log_format)` or `log_util.get_full_log(ngx, conf)`?
   
   But the plugin `kafka-logger` send `string` finally, it do json in this place:
   https://github.com/apache/apisix/blob/308bdd967792bb7b34355b98dcf5d6cdeae078f6/apisix/plugins/kafka-logger.lua#L230-L247
   
   I think I must  have failed to understand what you meant in some place.
   Do you want to express that return `table` in `log` stage, reassemble it into `json string` in the callback function of `batch_processor_manager` and send it?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950633920


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr ..
+        (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")

Review Comment:
   I define the `endpoint_addr`:
   ```
   endpoint_addr = {
               type = "string",
               pattern = [=[^[^\/]+:\/\/([\da-zA-Z.-]+|\[[\da-fA-F:]+\])(:\d+)?/]=]
   }
   ```
   But if the `endpoint_addr` is set without "/", APISIX will return such error info:
   ```shell
   {
     "error_msg": "failed to check the configuration of plugin elasticsearch-logger err: property \"endpoint_addr\" validation failed: failed to match pattern \"^[^\\\\/]+:\\\\/\\\\/([\\\\da-zA-Z.-]+|\\\\[[\\\\da-fA-F:]+\\\\])(:\\\\d+)?/\" with \"http://127.0.0.1:9200\""
   }
   ```
   It looks unfriendly to check the error, so I decide to use the method of checking whether there is a "/" in the code, or do you have a better solution?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951012338


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )

Review Comment:
   ```shell
   ➜  ~ curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/elasticsearch-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE
   {"deleted":"1","key":"\/apisix\/plugin_metadata\/elasticsearch-logger","node":{},"action":"delete"}
   ➜  ~ curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/elasticsearch-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE
   {"message":"Key not found"}
   ```
   @tzssangglass 
   If the `apisix/admin/plugin_metadata/elasticsearch-logger` has be deleted before, we exec it again, it will return `404`, we does not care about the delete result, so we can not check`if code >= 300`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951451463


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )

Review Comment:
   Maybe it's dirty data left in etcd from previously run tests in your environment?
   
   You can verify this:
   
   1. clear the test data in etcd, like `etcdctl del / --prefix`
   2. init dir, like `./bin/apisix init_etcd`
   3. run `prove -I../test-nginx/lib t/plugin/lasticsearch-logger.t` to test anain
   
   BTW, you can add TEST12 to delete `/apisix/admin/plugin_metadata/elasticsearch-logger`
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r952176360


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,433 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 6: set route (no auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 7: test route (no auth, failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 8: set route (error auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "111111"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 9: test route (error auth failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 10: add plugin metadata
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_PUT,
+                [[{
+                    "log_format": {
+                        "custom_host": "$host",
+                        "custom_timestamp": "$time_iso8601",
+                        "custom_client_ip": "$remote_addr"
+                    }
+                }]]
+                )
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 12: hit route and report custom elasticsearch logger
+--- extra_yaml_config
+nginx_config:
+    error_log_level:  info

Review Comment:
   @ccxhwmy 
   For this purpose, you can use `--- log_level: info`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander merged pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander merged PR #7643:
URL: https://github.com/apache/apisix/pull/7643


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1231047131

   Need we re-run the ci?


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1215023063

   > great work! 👍
   
   Thank you for your suggestions, there are helpful to me.
   Would you offer me some doc about document specification you are using, so that I can write docs more norm next.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r946235479


##########
t/plugin/elasticsearch-logging.t:
##########
@@ -0,0 +1,299 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                        index = "services",
+                        type = "collector",
+                        timeout = 60,
+                        username = "elastic",
+                        password = "123456",
+                        ssl_verify = false
+                    },
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                        index = "services"
+                    }
+                },
+                -- property "uri" is required
+                {
+                    endpoint = {
+                        index = "services",
+                    }
+                },
+                -- property "index" is required
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                    }
+                },
+                -- property "uri" validation failed
+                {
+                    endpoint = {
+                        uri = "127.0.0.1:9200",
+                        index = "services"
+                    }
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logging")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint" validation failed: property "uri" is required
+property "endpoint" validation failed: property "index" is required
+property "endpoint" validation failed: property "uri" validation failed.*
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logging"] = {
+                        endpoint = {
+                            uri = "http://127.0.0.1:9200",
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logging"] = {
+                        endpoint = {
+                            uri = "http://127.0.0.1:9201",
+                            index = "services",
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body

Review Comment:
   > We should check the data sent to the elasticsearch, via injecting like: [#7593 (comment)](https://github.com/apache/apisix/pull/7593#issuecomment-1210208755)
   
   Does you mean I should hook the function `httpc:request_uri` and mock it with my own mock_request_uri, and check the request body with the mock_request_uri?
   If so, I am not quite understand the nessary to check request body.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r949823849


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr ..
+        (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")
+    local body = core.table.concat(entries, "")
+    local headers = {["Content-Type"] = "application/json"}
+    if conf.auth and conf.auth.username and conf.auth.password then

Review Comment:
   We don't need to check username & password as they are already checked by the schema.



##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,301 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+
+
+
+=== TEST 6: set route (no auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 7: test route (no auth, failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 8: set route (error auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "111111"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 9: test route (error auth failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count

Review Comment:
   Let's add a test that we check the log data with the custom log format from plugin metadata.



##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,301 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world

Review Comment:
   We need to check the `Batch Processor[...] successfully processed the entries` log to make sure it is successfully written.
   We need to also verify the log data received by the stub server or via injected check.



##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr ..
+        (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")

Review Comment:
   Look like we can require the endpoint_addr doesn't end with `/` in the schema. So that we don't need to check it  in per-request manner.



##########
docs/en/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,206 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API Gateway
+  - Plugin
+  - Elasticsearch-logger
+description: This document contains information about the Apache APISIX elasticsearch-logger Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `elasticsearch-logger` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage.
+
+When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details.
+
+## Attributes
+
+| Name          | Required | Default                     | Description                                                  |
+| ------------- | -------- | --------------------------- | ------------------------------------------------------------ |
+| endpoint_addr | True     |                             | Elasticsearch API                                            |
+| field         | True     |                             | Elasticsearch `field` configuration                          |
+| field.index   | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) configuration |
+| auth.username | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |

Review Comment:
   The username & password are required.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950359278


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr ..
+        (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")

Review Comment:
   That is a good idea!



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950840161


##########
docs/en/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,206 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API Gateway
+  - Plugin
+  - Elasticsearch-logger
+description: This document contains information about the Apache APISIX elasticsearch-logger Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `elasticsearch-logger` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage.
+
+When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details.
+
+## Attributes
+
+| Name          | Type    | Required | Default                     | Description                                                  |
+| ------------- | ------- | -------- | --------------------------- | ------------------------------------------------------------ |
+| endpoint_addr | string  | True     |                             | Elasticsearch API                                            |
+| field         | array   | True     |                             | Elasticsearch `field` configuration                          |
+| field.index   | string  | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | string  | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | array   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) configuration |
+| auth.username | string  | True     |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| auth.password | string  | True     |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| ssl_verify    | boolean | False    | true                        | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). |
+| timeout       | integer | False    | 10                          | Elasticsearch send data timeout in seconds.                  |
+
+This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration.
+
+## Enabling the Plugin
+
+### Full configuration
+
+The example below shows a complete configuration of the Plugin on a specific Route:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services",
+                "type":"collector"
+            },
+            "auth":{
+                "username":"elastic",
+                "password":"123456"
+            },
+            "ssl_verify":false,
+            "timeout": 60,
+            "retry_delay":1,
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "batch_max_size":1000,
+            "inactive_timeout":5,
+            "name":"elasticsearch-logger"
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### Minimal configuration example
+
+The example below shows a bare minimum configuration of the Plugin on a Route:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## Example usage
+
+Once you have configured the Route to use the Plugin, when you make a request to APISIX, it will be logged in your Elasticsearch server:
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+You should be able to login and search these logs from your Kibana discover:
+
+![kibana search view](../../../assets/images/plugin/elasticsearch-admin-en.png)
+
+## Metadata
+
+You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available:
+
+| Name       | Type   | Required | Default                                                      | Description                                                  |
+| ---------- | ------ | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| log_format | object | False    | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](https://github.com/apache/apisix/blob/master/docs/en/latest/apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. |
+
+:::info IMPORTANT
+
+Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `elasticsearch-logger` Plugin.
+
+:::
+
+The example below shows how you can configure through the Admin API:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/elasticsearch-logger \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "log_format": {
+        "host": "$host",
+        "@timestamp": "$time_iso8601",
+        "client_ip": "$remote_addr"
+    }
+}'
+```
+
+With this configuration, your logs would be formatted as shown below:
+
+```shell
+{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"}
+{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"}
+```
+
+ make a request to APISIX again:
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello

Review Comment:
   Let's quote `?` and `=`



##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,168 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                    minItems = 1

Review Comment:
   For string, we use minLength: https://github.com/apache/apisix/blob/29f21a0eceb8eedc5d40ba68af154698f0eb70f4/apisix/schema_def.lua#L86



##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,433 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 6: set route (no auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 7: test route (no auth, failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 8: set route (error auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "111111"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 9: test route (error auth failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 10: add plugin metadata
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_PUT,
+                [[{
+                    "log_format": {
+                        "custom_host": "$host",
+                        "custom_timestamp": "$time_iso8601",
+                        "custom_client_ip": "$remote_addr"
+                    }
+                }]]
+                )
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 12: hit route and report custom elasticsearch logger

Review Comment:
   What's the difference from TEST 13 and TEST 12?



##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,433 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 6: set route (no auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 7: test route (no auth, failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 8: set route (error auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "111111"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 9: test route (error auth failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 10: add plugin metadata
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_PUT,
+                [[{
+                    "log_format": {
+                        "custom_host": "$host",
+                        "custom_timestamp": "$time_iso8601",
+                        "custom_client_ip": "$remote_addr"
+                    }
+                }]]
+                )
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: set route

Review Comment:
   We can merge TEST 11 into TEST 10



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945420782


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)

Review Comment:
   > Please also support the custom log format.
   
   How about the reference `kafka-logger` plugin?
   https://github.com/apache/apisix/blob/689e4f11f4c42d3d1bf5cdf82d5aaa3976fc715d/apisix/plugins/kafka-logger.lua#L178
   ```lua
   local entry
       if conf.meta_format == "origin" then
           entry = log_util.get_req_original(ctx, conf)
           -- core.log.info("origin entry: ", entry)
   
       else
           local metadata = plugin.plugin_metadata(plugin_name)
           core.log.info("metadata: ", core.json.delay_encode(metadata))
           if metadata and metadata.value.log_format
             and core.table.nkeys(metadata.value.log_format) > 0
           then
               entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
               core.log.info("custom log format entry: ", core.json.delay_encode(entry))
           else
               entry = log_util.get_full_log(ngx, conf)
               core.log.info("full log entry: ", core.json.delay_encode(entry))
           end
       end
   ```
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951012338


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )

Review Comment:
   ```shell
   ➜  ~ curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/elasticsearch-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE
   {"deleted":"1","key":"\/apisix\/plugin_metadata\/elasticsearch-logger","node":{},"action":"delete"}
   ➜  ~ curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/elasticsearch-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE
   {"message":"Key not found"}
   ```
   If the `apisix/admin/plugin_metadata/elasticsearch-logger` has be deleted before, we exec it again, it will return `404`, we does not care about the delete result, so we can not check`if code >= 300`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950959580


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )
+
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- extra_init_by_lua
+    local core = require("apisix.core")
+    local http = require("resty.http")
+    local ngx_re = require("ngx.re")
+    local log_util = require("apisix.utils.log-util")
+    log_util.get_full_log = function(ngx, conf)
+        return {
+            test = "test"
+        }
+    end
+
+    http.request_uri = function(self, uri, params)
+        if not params.body or type(params.body) ~= "string" then
+            return nil, "invalid params body"
+        end
+
+        local arr = ngx_re.split(params.body, "\n")
+        if not arr or #arr ~= 2 then
+            return nil, "invalid params body"
+        end
+
+        local entry = core.json.decode(arr[2])
+        local origin_entry = log_util.get_full_log(ngx, {})
+        for k, v in pairs(origin_entry) do
+            local vv = entry[k]
+            if not vv or vv ~= v then
+                return nil, "invalid params body"
+            end
+        end
+
+        core.log.error("check elasticsearch full log body success")
+        return {
+            status = 200,
+            body = "success"
+        }, nil
+    end
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+check elasticsearch full log body success
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 6: set route (no auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 7: test route (no auth, failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries

Review Comment:
   Can we see the logs of elasticsearch do auth failed? If so, it would be clearer for us to verify this log.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951084694


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )

Review Comment:
   we have `no_shuffle` on the boot of this test file, this means that the test cases will be executed in order.
   
   I think the existence of `apisix/admin/plugin_metadata/elasticsearch-logger` should be determined as far as the test file ` t/plugin/elasticsearch-logger.t` is concerned.
   
   One CI test detail: before running a new test file each time, the test shell clears ETCD and initializes etcd with `apisix init_etcd` to prevent dirty data from other test files from affecting the current test file.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r952176181


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr ..
+        (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")

Review Comment:
   @ccxhwmy 
   What about:
   ```
   addr = {
   	type = "string",
   	pattern = "[^/]$",
   }
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1219070691

   Please make the CI pass, thanks!


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r957322432


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,181 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = {
+            type = "string",
+            pattern = "[^/]$",
+        },
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                    minLength = 1
+                },
+                password = {
+                    type = "string",
+                    minLength = 1
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local metadata_schema = {
+    type = "object",
+    properties = {
+        log_format = log_util.metadata_schema_log_format,
+    },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+    metadata_schema = metadata_schema,
+}
+
+
+function _M.check_schema(conf, schema_type)
+    if schema_type == core.schema.TYPE_METADATA then
+        return core.schema.check(metadata_schema, conf)
+    end
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr .. "/_bulk"
+    local body = core.table.concat(entries, "")
+    local headers = {["Content-Type"] = "application/json"}

Review Comment:
   > Would be better to use `application/x-ndjson`?
   
   I learned about `ndjson`, I think it is more suitable than `json` here. Thank you.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
spacewander commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945284179


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)
+    return core.json.encode({
+            create = {
+                _index = conf.endpoint.index,
+                _type = conf.endpoint.type
+            }
+        }) .. "\n" ..
+        core.json.encode({
+            time = ngx_now(),
+            host = entry.server.hostname,
+            source = DEFAULT_ELASTICSEARCH_SOURCE,
+            request_url = entry.request.url,
+            request_method = entry.request.method,
+            request_headers = entry.request.headers,
+            request_query = entry.request.querystring,
+            request_size = entry.request.size,
+            response_headers = entry.response.headers,
+            response_status = entry.response.status,
+            response_size = entry.response.size,
+            latency = entry.latency,
+            upstream = entry.upstream,
+        }) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint.uri ..
+        (str_sub(conf.endpoint.uri, -1) == "/" and "_bulk" or "/_bulk")

Review Comment:
   Use string.byte would be better



##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},

Review Comment:
   We can store username & password in an additional field like https://github.com/apache/apisix/blob/a8d03acdc1f7253960ede56a580487fc5219e9d6/apisix/plugins/kafka-proxy.lua#L25
   So that we can require them easily.



##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {

Review Comment:
   Why do we wrap all the fields in an extra endpoint field?



##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)
+    return core.json.encode({
+            create = {
+                _index = conf.endpoint.index,
+                _type = conf.endpoint.type
+            }
+        }) .. "\n" ..
+        core.json.encode({
+            time = ngx_now(),
+            host = entry.server.hostname,

Review Comment:
   Why should we invent a format structure for a specific plugin?



##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"

Review Comment:
   We should call it elasticsearch-logger like the kafka-logger plugin?



##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx

Review Comment:
   We should move the localized variable after `require ...`



##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)

Review Comment:
   Please also support the custom log format.



##########
t/plugin/elasticsearch-logging.t:
##########
@@ -0,0 +1,299 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                        index = "services",
+                        type = "collector",
+                        timeout = 60,
+                        username = "elastic",
+                        password = "123456",
+                        ssl_verify = false
+                    },
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                        index = "services"
+                    }
+                },
+                -- property "uri" is required
+                {
+                    endpoint = {
+                        index = "services",
+                    }
+                },
+                -- property "index" is required
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                    }
+                },
+                -- property "uri" validation failed
+                {
+                    endpoint = {
+                        uri = "127.0.0.1:9200",
+                        index = "services"
+                    }
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logging")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint" validation failed: property "uri" is required
+property "endpoint" validation failed: property "index" is required
+property "endpoint" validation failed: property "uri" validation failed.*
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logging"] = {
+                        endpoint = {
+                            uri = "http://127.0.0.1:9200",
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logging"] = {
+                        endpoint = {
+                            uri = "http://127.0.0.1:9201",
+                            index = "services",
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body

Review Comment:
   We should check the data sent to the elasticsearch, via injecting like: https://github.com/apache/apisix/pull/7593#issuecomment-1210208755



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] hf400159 commented on pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
hf400159 commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1214722658

   great work! 👍 


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1216193050

   > Please make the doc lint pass, thanks!
   
   I noticed the failure, I am modifying other suggestion, I will repair the ci later. 


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1231048405

   > Need we re-run the ci?
   
   done


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950958953


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )

Review Comment:
   ```suggestion
               local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
                   ngx.HTTP_DELETE
              )
   ```
   
   is ok, and we need to check if code >= 300



##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )

Review Comment:
   ```suggestion
               local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
                   ngx.HTTP_DELETE
               )
   ```
   
   is ok, and we need to check if code >= 300



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950963729


##########
docs/zh/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,197 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API 网关
+  - 插件
+  - Elasticsearch-logger
+  - 日志
+description: 本文介绍了 API 网关 Apache APISIX 的 elasticsearch-logger 插件。使用该插件可以将 APISIX 的日志数据推送到 Elasticserach。
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称          | 类型    | 是否必需 | 默认值               | 描述                                                         |
+| ------------- | ------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint_addr | string  | 是       |                      | Elasticsearch API                                            |
+| field         | array   | 是       |                      | Elasticsearch `field`配置信息                                |
+| field.index   | string  | 是       |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | string  | 否       | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | array   | 否       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) 配置信息 |
+| auth.username | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| auth.password | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| ssl_verify    | boolean | 否       | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| timeout       | integer | 否       | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。
+
+## 启用插件
+
+你可以通过如下命令在指定路由上启用 `elasticsearch-logger` 插件:
+
+### 完整配置示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services",
+                "type":"collector"
+            },
+            "auth":{
+                "username":"elastic",
+                "password":"123456"
+            },
+            "ssl_verify":false,
+            "timeout": 60,
+            "retry_delay":1,
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "batch_max_size":1000,
+            "inactive_timeout":5,
+            "name":"elasticsearch-logger"
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### 最小化配置示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## 测试插件
+
+向配置 `elasticsearch-logger` 插件的路由发送请求
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+现在,你可以登录 Kibana 控制台检索查看相关日志。

Review Comment:
   I was wondering if we could view this log through elasticsearch's retrieval API instead of Kibana.  Just an idea.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945467918


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)
+    return core.json.encode({
+            create = {
+                _index = conf.endpoint.index,
+                _type = conf.endpoint.type
+            }
+        }) .. "\n" ..
+        core.json.encode({
+            time = ngx_now(),
+            host = entry.server.hostname,
+            source = DEFAULT_ELASTICSEARCH_SOURCE,
+            request_url = entry.request.url,
+            request_method = entry.request.method,
+            request_headers = entry.request.headers,
+            request_query = entry.request.querystring,
+            request_size = entry.request.size,
+            response_headers = entry.response.headers,
+            response_status = entry.response.status,
+            response_size = entry.response.size,
+            latency = entry.latency,
+            upstream = entry.upstream,
+        }) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint.uri ..
+        (str_sub(conf.endpoint.uri, -1) == "/" and "_bulk" or "/_bulk")
+    local body = core.table.concat(entries, "")
+    local headers = {["Content-Type"] = "application/json"}
+    if conf.endpoint.username and conf.endpoint.password then
+        local authorization = "Basic " .. ngx.encode_base64(
+            conf.endpoint.username .. ":" .. conf.endpoint.password
+        )
+        headers["Authorization"] = authorization
+    end
+
+    core.log.info("uri: ", uri, ", body: ", body, ", headers: ", core.json.encode(headers))
+
+    httpc:set_timeout(conf.endpoint.timeout * 1000)
+    local resp, err = httpc:request_uri(uri, {
+        ssl_verify = conf.endpoint.ssl_verify,
+        method = "POST",
+        headers = headers,
+        body = body
+    })
+    if not resp then
+        return false,  str_format("RequestError: %s", err or "")
+    end
+
+    if resp.status ~= 200 then
+        return false, str_format("response status: %d, response body: %s",

Review Comment:
   ```suggestion
           return false, str_format("elasticsearch server returned status status: %d, body: %s",
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945468550


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)
+    return core.json.encode({
+            create = {
+                _index = conf.endpoint.index,
+                _type = conf.endpoint.type
+            }
+        }) .. "\n" ..
+        core.json.encode({
+            time = ngx_now(),
+            host = entry.server.hostname,

Review Comment:
   Remember that when we update the `entry`, we also need to update the png image.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1225089428

   LGTM(left two comments about indent)


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1224209042

   @ccxhwmy we need to resolve conflict files.


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1226617285

   Need we re-run the ci?


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951084694


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )

Review Comment:
   we have `no_shuffle` on the boot of this test file, this means that the test cases will be executed in order. Why is there uncertainty that `apisix/admin/plugin_metadata/elasticsearch-logger` may or may not exist? 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950920935


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+        and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint_addr ..
+        (str_byte(conf.endpoint_addr, -1) == str_byte("/") and "_bulk" or "/_bulk")

Review Comment:
   > @ccxhwmy Sorry, I can't get your point. We can use a simple pattern that checks if the configuration doesn't end with `/`. Regularly, an `addr` should not end with `/`.
   
   I have difficulty in doing it, would you give me some suggestion.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945461255


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)

Review Comment:
   We do not reference the code of another plugin in one plugin. Unless we pull some generic code into a common module.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945300647


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"

Review Comment:
   > We should call it elasticsearch-logger like the kafka-logger plugin?
   
   OK, do I need to change all `ealsticsearch-logging` to `elasticsearch-logger`? Including the file name?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1216179952

   Please make the doc lint pass, thanks!


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] spacewander commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
spacewander commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r949829184


##########
t/plugin/elasticsearch-logging.t:
##########
@@ -0,0 +1,299 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                        index = "services",
+                        type = "collector",
+                        timeout = 60,
+                        username = "elastic",
+                        password = "123456",
+                        ssl_verify = false
+                    },
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                        index = "services"
+                    }
+                },
+                -- property "uri" is required
+                {
+                    endpoint = {
+                        index = "services",
+                    }
+                },
+                -- property "index" is required
+                {
+                    endpoint = {
+                        uri = "http://127.0.0.1:9200",
+                    }
+                },
+                -- property "uri" validation failed
+                {
+                    endpoint = {
+                        uri = "127.0.0.1:9200",
+                        index = "services"
+                    }
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logging")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint" validation failed: property "uri" is required
+property "endpoint" validation failed: property "index" is required
+property "endpoint" validation failed: property "uri" validation failed.*
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logging"] = {
+                        endpoint = {
+                            uri = "http://127.0.0.1:9200",
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logging"] = {
+                        endpoint = {
+                            uri = "http://127.0.0.1:9201",
+                            index = "services",
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body

Review Comment:
   Sorry for missing this. Yes, you are right.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950358341


##########
docs/en/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,206 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API Gateway
+  - Plugin
+  - Elasticsearch-logger
+description: This document contains information about the Apache APISIX elasticsearch-logger Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `elasticsearch-logger` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage.
+
+When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details.
+
+## Attributes
+
+| Name          | Required | Default                     | Description                                                  |
+| ------------- | -------- | --------------------------- | ------------------------------------------------------------ |
+| endpoint_addr | True     |                             | Elasticsearch API                                            |
+| field         | True     |                             | Elasticsearch `field` configuration                          |
+| field.index   | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) configuration |
+| auth.username | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |

Review Comment:
   If the Elasticserach configured `xpack.security.enabled: false`, then username & password are not needed.
   If `elasticserach-logger` set username & password required, are users confused in this place?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1219677763

   @spacewander 
   Need we re-run the ci?


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950841788


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,433 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9200",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 3: test route (success write)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 4: set route (auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "123456"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 5: test route (auth success)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] successfully processed the entries
+
+
+
+=== TEST 6: set route (no auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 7: test route (no auth, failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 8: set route (error auth)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        auth = {
+                            username = "elastic",
+                            password = "111111"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 9: test route (error auth failed)
+--- request
+GET /hello
+--- wait: 2
+--- response_body
+hello world
+--- error_log
+Batch Processor[elasticsearch-logger] failed to process entries
+Batch Processor[elasticsearch-logger] exceeded the max_retry_count
+
+
+
+=== TEST 10: add plugin metadata
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_PUT,
+                [[{
+                    "log_format": {
+                        "custom_host": "$host",
+                        "custom_timestamp": "$time_iso8601",
+                        "custom_client_ip": "$remote_addr"
+                    }
+                }]]
+                )
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+--- no_error_log
+[error]
+
+
+
+=== TEST 11: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, {
+                uri = "/hello",
+                upstream = {
+                    type = "roundrobin",
+                    nodes = {
+                        ["127.0.0.1:1980"] = 1
+                    }
+                },
+                plugins = {
+                    ["elasticsearch-logger"] = {
+                        endpoint_addr = "http://127.0.0.1:9201",
+                        field = {
+                            index = "services"
+                        },
+                        batch_max_size = 1,
+                        inactive_timeout = 1
+                    }
+                }
+            })
+
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- request
+GET /t
+--- response_body
+passed
+
+
+
+=== TEST 12: hit route and report custom elasticsearch logger
+--- extra_yaml_config
+nginx_config:
+    error_log_level:  info

Review Comment:
   > The error log level of the test framework is controlled by `log_level('xxx');`, not the yaml config.
   
   `log_level('xxx')` can only be set for the whole file, I just want `info log` work on this test case.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1214400825

   > I tryed to do it, but failed, it seems that I don't have permission.
   
   like:
   
   Fixes #7636


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1214478128

   > > I tryed to do it, but failed, it seems that I don't have permission.
   > 
   > like:
   > 
   > Fixes #7636
   
   I get, Thank you


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r957249149


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,181 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {

Review Comment:
   > Where do we use this field?
   
   My mistake, I forgot to remove it.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logging

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r945464487


##########
apisix/plugins/elasticsearch-logging.lua:
##########
@@ -0,0 +1,154 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local ngx             = ngx
+local core            = require("apisix.core")
+local ngx_now         = ngx.now
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+
+local DEFAULT_ELASTICSEARCH_SOURCE = "apache-apisix-elasticsearch-logging"
+
+local plugin_name = "elasticsearch-logging"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+local str_format = core.string.format
+local str_sub = string.sub
+
+
+local schema = {
+    type = "object",
+    properties = {
+        endpoint = {
+            type = "object",
+            properties = {
+                uri = core.schema.uri_def,
+                index = { type = "string"},
+                type = { type = "string"},
+                username = { type = "string"},
+                password = { type = "string"},
+                timeout = {
+                    type = "integer",
+                    minimum = 1,
+                    default = 10
+                },
+                ssl_verify = {
+                    type = "boolean",
+                    default = true
+                }
+            },
+            required = { "uri", "index" }
+        },
+    },
+    required = { "endpoint" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf)
+    local entry = log_util.get_full_log(ngx, conf)
+    return core.json.encode({
+            create = {
+                _index = conf.endpoint.index,
+                _type = conf.endpoint.type
+            }
+        }) .. "\n" ..
+        core.json.encode({
+            time = ngx_now(),
+            host = entry.server.hostname,
+            source = DEFAULT_ELASTICSEARCH_SOURCE,
+            request_url = entry.request.url,
+            request_method = entry.request.method,
+            request_headers = entry.request.headers,
+            request_query = entry.request.querystring,
+            request_size = entry.request.size,
+            response_headers = entry.response.headers,
+            response_status = entry.response.status,
+            response_size = entry.response.size,
+            latency = entry.latency,
+            upstream = entry.upstream,
+        }) .. "\n"
+end
+
+
+local function send_to_elasticsearch(conf, entries)
+    local httpc, err = http.new()
+    if not httpc then
+        return false, str_format("create http error: %s", err)
+    end
+
+    local uri = conf.endpoint.uri ..
+        (str_sub(conf.endpoint.uri, -1) == "/" and "_bulk" or "/_bulk")
+    local body = core.table.concat(entries, "")
+    local headers = {["Content-Type"] = "application/json"}
+    if conf.endpoint.username and conf.endpoint.password then
+        local authorization = "Basic " .. ngx.encode_base64(
+            conf.endpoint.username .. ":" .. conf.endpoint.password
+        )
+        headers["Authorization"] = authorization
+    end
+
+    core.log.info("uri: ", uri, ", body: ", body, ", headers: ", core.json.encode(headers))
+
+    httpc:set_timeout(conf.endpoint.timeout * 1000)
+    local resp, err = httpc:request_uri(uri, {
+        ssl_verify = conf.endpoint.ssl_verify,
+        method = "POST",
+        headers = headers,
+        body = body
+    })
+    if not resp then
+        return false,  str_format("RequestError: %s", err or "")

Review Comment:
   ```suggestion
           return false,  err
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r952063701


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )

Review Comment:
   Perhaps you can add this to the documentation later: https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on PR #7643:
URL: https://github.com/apache/apisix/pull/7643#issuecomment-1221267350

   Need we re-run the ci?


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951022752


##########
docs/en/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,206 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API Gateway
+  - Plugin
+  - Elasticsearch-logger
+description: This document contains information about the Apache APISIX elasticsearch-logger Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `elasticsearch-logger` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage.
+
+When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details.
+
+## Attributes
+
+| Name          | Type    | Required | Default                     | Description                                                  |
+| ------------- | ------- | -------- | --------------------------- | ------------------------------------------------------------ |
+| endpoint_addr | string  | True     |                             | Elasticsearch API                                            |
+| field         | array   | True     |                             | Elasticsearch `field` configuration                          |
+| field.index   | string  | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | string  | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | array   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) configuration |
+| auth.username | string  | True     |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| auth.password | string  | True     |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| ssl_verify    | boolean | False    | true                        | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). |
+| timeout       | integer | False    | 10                          | Elasticsearch send data timeout in seconds.                  |
+
+This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration.
+
+## Enabling the Plugin
+
+### Full configuration
+
+The example below shows a complete configuration of the Plugin on a specific Route:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services",
+                "type":"collector"
+            },
+            "auth":{
+                "username":"elastic",
+                "password":"123456"
+            },
+            "ssl_verify":false,
+            "timeout": 60,
+            "retry_delay":1,
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "batch_max_size":1000,
+            "inactive_timeout":5,
+            "name":"elasticsearch-logger"
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### Minimal configuration example
+
+The example below shows a bare minimum configuration of the Plugin on a Route:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## Example usage
+
+Once you have configured the Route to use the Plugin, when you make a request to APISIX, it will be logged in your Elasticsearch server:
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+You should be able to login and search these logs from your Kibana discover:
+
+![kibana search view](../../../assets/images/plugin/elasticsearch-admin-en.png)
+
+## Metadata
+
+You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available:
+
+| Name       | Type   | Required | Default                                                      | Description                                                  |
+| ---------- | ------ | -------- | ------------------------------------------------------------ | ------------------------------------------------------------ |
+| log_format | object | False    | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](https://github.com/apache/apisix/blob/master/docs/en/latest/apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. |
+
+:::info IMPORTANT
+
+Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `elasticsearch-logger` Plugin.
+
+:::
+
+The example below shows how you can configure through the Admin API:
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/elasticsearch-logger \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "log_format": {
+        "host": "$host",
+        "@timestamp": "$time_iso8601",
+        "client_ip": "$remote_addr"
+    }
+}'
+```
+
+With this configuration, your logs would be formatted as shown below:
+
+```shell
+{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"}
+{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"}
+```
+
+ make a request to APISIX again:
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello

Review Comment:
   > Let's quote `?` and `=`
   
   Sorry, I can't get your point.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951081750


##########
docs/zh/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,197 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API 网关
+  - 插件
+  - Elasticsearch-logger
+  - 日志
+description: 本文介绍了 API 网关 Apache APISIX 的 elasticsearch-logger 插件。使用该插件可以将 APISIX 的日志数据推送到 Elasticserach。
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称          | 类型    | 是否必需 | 默认值               | 描述                                                         |
+| ------------- | ------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint_addr | string  | 是       |                      | Elasticsearch API                                            |
+| field         | array   | 是       |                      | Elasticsearch `field`配置信息                                |
+| field.index   | string  | 是       |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | string  | 否       | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | array   | 否       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) 配置信息 |
+| auth.username | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| auth.password | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| ssl_verify    | boolean | 否       | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| timeout       | integer | 否       | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。
+
+## 启用插件
+
+你可以通过如下命令在指定路由上启用 `elasticsearch-logger` 插件:
+
+### 完整配置示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services",
+                "type":"collector"
+            },
+            "auth":{
+                "username":"elastic",
+                "password":"123456"
+            },
+            "ssl_verify":false,
+            "timeout": 60,
+            "retry_delay":1,
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "batch_max_size":1000,
+            "inactive_timeout":5,
+            "name":"elasticsearch-logger"
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### 最小化配置示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## 测试插件
+
+向配置 `elasticsearch-logger` 插件的路由发送请求
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do?q=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+现在,你可以登录 Kibana 控制台检索查看相关日志。

Review Comment:
   IMO, I like to reduce references to additional components in the documentation, if not necessary. 
   `Do not multiply entities beyond necessity`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r950965527


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+    and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"

Review Comment:
   got it, my mistake.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r951493596


##########
t/plugin/elasticsearch-logger.t:
##########
@@ -0,0 +1,447 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+use t::APISIX 'no_plan';
+
+log_level('debug');
+repeat_each(1);
+no_long_string();
+no_root_location();
+no_shuffle();
+
+add_block_preprocessor(sub {
+    my ($block) = @_;
+
+    if ((!defined $block->error_log) && (!defined $block->no_error_log)) {
+        $block->set_value("no_error_log", "[error]");
+    }
+
+    if (!defined $block->request) {
+        $block->set_value("request", "GET /t");
+    }
+
+});
+
+run_tests();
+
+__DATA__
+
+=== TEST 1: sanity
+--- config
+    location /t {
+        content_by_lua_block {
+            local ok, err
+            local configs = {
+                -- full configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services",
+                        type = "collector"
+                    },
+                    auth = {
+                        username = "elastic",
+                        password = "123456"
+                    },
+                    ssl_verify = false,
+                    timeout = 60,
+                    max_retry_count = 0,
+                    retry_delay = 1,
+                    buffer_duration = 60,
+                    inactive_timeout = 2,
+                    batch_max_size = 10,
+                },
+                -- minimize configuration
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "endpoint_addr" is required
+                {
+                    field = {
+                        index = "services"
+                    }
+                },
+                -- property "field" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                },
+                -- property "index" is required
+                {
+                    endpoint_addr = "http://127.0.0.1:9200",
+                    field = {}
+                }
+            }
+
+            local plugin = require("apisix.plugins.elasticsearch-logger")
+            for i = 1, #configs do
+                ok, err = plugin.check_schema(configs[i])
+                if err then
+                    ngx.say(err)
+                else
+                    ngx.say("passed")
+                end
+            end
+        }
+    }
+--- response_body_like
+passed
+passed
+property "endpoint_addr" is required
+property "field" is required
+property "field" validation failed: property "index" is required
+
+
+
+=== TEST 2: set route
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger',
+                ngx.HTTP_DELETE,
+                 nil,
+                 [[{"action": "delete"}]]
+                 )

Review Comment:
   Yes, I know the reason.
   Maybe this is a question about the principles of how to write test cases, when we write a test cases, do we assume the environment is the default initial environment of the system, or we need to adjust the test environment in the test cases to ensure the test effect.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r949368202


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+    and core.table.nkeys(metadata.value.log_format) > 0

Review Comment:
   ```suggestion
       if metadata and metadata.value.log_format
           and core.table.nkeys(metadata.value.log_format) > 0
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] tzssangglass commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
tzssangglass commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r949373594


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+    and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"

Review Comment:
   I think maybe you can take a try at here, just return JSON here, not JSON string, let me see how elasticsearch handles it.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] ccxhwmy commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
ccxhwmy commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r949686873


##########
apisix/plugins/elasticsearch-logger.lua:
##########
@@ -0,0 +1,166 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core            = require("apisix.core")
+local http            = require("resty.http")
+local log_util        = require("apisix.utils.log-util")
+local bp_manager_mod  = require("apisix.utils.batch-processor-manager")
+local plugin          = require("apisix.plugin")
+
+local ngx             = ngx
+local str_format      = core.string.format
+local str_byte        = string.byte
+
+local plugin_name = "elasticsearch-logger"
+local batch_processor_manager = bp_manager_mod.new(plugin_name)
+
+
+local schema = {
+    type = "object",
+    properties = {
+        meta_format = {
+            type = "string",
+            default = "default",
+            enum = {"default", "origin"},
+        },
+        endpoint_addr = core.schema.uri_def,
+        field = {
+            type = "object",
+            properties = {
+                index = { type = "string"},
+                type = { type = "string"}
+            },
+            required = {"index"}
+        },
+        auth = {
+            type = "object",
+            properties = {
+                username = {
+                    type = "string",
+                },
+                password = {
+                    type = "string",
+                },
+            },
+            required = {"username", "password"},
+        },
+        timeout = {
+            type = "integer",
+            minimum = 1,
+            default = 10
+        },
+        ssl_verify = {
+            type = "boolean",
+            default = true
+        }
+    },
+    required = { "endpoint_addr", "field" },
+}
+
+
+local _M = {
+    version = 0.1,
+    priority = 413,
+    name = plugin_name,
+    schema = batch_processor_manager:wrap_schema(schema),
+}
+
+
+function _M.check_schema(conf)
+    return core.schema.check(schema, conf)
+end
+
+
+local function get_logger_entry(conf, ctx)
+    local entry
+    local metadata = plugin.plugin_metadata(plugin_name)
+    core.log.info("metadata: ", core.json.delay_encode(metadata))
+    if metadata and metadata.value.log_format
+    and core.table.nkeys(metadata.value.log_format) > 0
+    then
+        entry = log_util.get_custom_format_log(ctx, metadata.value.log_format)
+        core.log.info("custom log format entry: ", core.json.delay_encode(entry))
+    else
+        entry = log_util.get_full_log(ngx, conf)
+        core.log.info("full log entry: ", core.json.delay_encode(entry))
+    end
+
+    return core.json.encode({
+            create = {
+                _index = conf.field.index,
+                _type = conf.field.type
+            }
+        }) .. "\n" ..
+        core.json.encode(entry) .. "\n"

Review Comment:
   > I think maybe you can take a try at here, just return JSON here, not JSON string, let me see how elasticsearch handles it.
   
   But the `httpc:request_uri` need a string body, if I return `table` here, I still need to do `json.encode` later before `httpc:request_uri`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org


[GitHub] [apisix] hf400159 commented on a diff in pull request #7643: feat: add elasticsearch-logger

Posted by GitBox <gi...@apache.org>.
hf400159 commented on code in PR #7643:
URL: https://github.com/apache/apisix/pull/7643#discussion_r953895770


##########
docs/zh/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,278 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API 网关
+  - 插件
+  - Elasticsearch-logger
+  - 日志
+description: 本文介绍了 API 网关 Apache APISIX 的 elasticsearch-logger 插件。使用该插件可以将 APISIX 的日志数据推送到 Elasticserach。
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)

Review Comment:
   ```suggestion
   `elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储。
   
   启用该插件后 APISIX 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 Elaticsearch 中。更多信息,请参考 [Batch-Processor](./batch-processor.md)。
   ```



##########
docs/en/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,287 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API Gateway
+  - Plugin
+  - Elasticsearch-logger
+description: This document contains information about the Apache APISIX elasticsearch-logger Plugin.
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## Description
+
+The `elasticsearch-logger` Plugin is used to forward logs to [Elasticsearch](https://www.elastic.co/guide/en/welcome-to-elastic/current/getting-started-general-purpose.html) for analysis and storage.
+
+When the Plugin is enabled, APISIX will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details.
+
+## Attributes
+
+| Name          | Type    | Required | Default                     | Description                                                  |
+| ------------- | ------- | -------- | --------------------------- | ------------------------------------------------------------ |
+| endpoint_addr | string  | True     |                             | Elasticsearch API                                            |
+| field         | array   | True     |                             | Elasticsearch `field` configuration                          |
+| field.index   | string  | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | string  | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | array   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) configuration |
+| auth.username | string  | True     |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| auth.password | string  | True     |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| ssl_verify    | boolean | False    | true                        | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). |
+| timeout       | integer | False    | 10                          | Elasticsearch send data timeout in seconds.                  |

Review Comment:
   ```suggestion
   | endpoint_addr | string  | True     |                             | Elasticsearch API.                                            |
   | field         | array   | True     |                             | Elasticsearch `field` configuration.                          |
   | field.index   | string  | True     |                             | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field). |
   | field.type    | string  | False    | Elasticsearch default value | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field). |
   | auth          | array   | False    |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) configuration. |
   | auth.username | string  | True     |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username. |
   | auth.password | string  | True     |                             | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password. |
   | ssl_verify    | boolean | False    | true                        | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). |
   | timeout       | integer | False    | 10                          | Elasticsearch send data timeout in seconds.                  |
   ```



##########
docs/zh/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,278 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API 网关
+  - 插件
+  - Elasticsearch-logger
+  - 日志
+description: 本文介绍了 API 网关 Apache APISIX 的 elasticsearch-logger 插件。使用该插件可以将 APISIX 的日志数据推送到 Elasticserach。
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称          | 类型    | 是否必需 | 默认值               | 描述                                                         |
+| ------------- | ------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint_addr | string  | 是       |                      | Elasticsearch API                                            |
+| field         | array   | 是       |                      | Elasticsearch `field`配置信息                                |
+| field.index   | string  | 是       |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | string  | 否       | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | array   | 否       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) 配置信息 |
+| auth.username | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| auth.password | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| ssl_verify    | boolean | 否       | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| timeout       | integer | 否       | 10                   | 发送给 Elasticsearch 请求超时时间                            |
+
+本插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。
+
+## 启用插件
+
+你可以通过如下命令在指定路由上启用 `elasticsearch-logger` 插件:
+
+### 完整配置示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services",
+                "type":"collector"
+            },
+            "auth":{
+                "username":"elastic",
+                "password":"123456"
+            },
+            "ssl_verify":false,
+            "timeout": 60,
+            "retry_delay":1,
+            "buffer_duration":60,
+            "max_retry_count":0,
+            "batch_max_size":1000,
+            "inactive_timeout":5,
+            "name":"elasticsearch-logger"
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+### 最小化配置示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/routes/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "plugins":{
+        "elasticsearch-logger":{
+            "endpoint_addr":"http://127.0.0.1:9200",
+            "field":{
+                "index":"services"
+            }
+        }
+    },
+    "upstream":{
+        "type":"roundrobin",
+        "nodes":{
+            "127.0.0.1:1980":1
+        }
+    },
+    "uri":"/elasticsearch.do"
+}'
+```
+
+## 测试插件
+
+向配置 `elasticsearch-logger` 插件的路由发送请求
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do\?q\=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+现在,你可以从 Elasticsearch 获取相关日志。
+
+```shell
+curl -X GET "http://127.0.0.1:9200/services/_search" | jq .
+{
+  "took": 0,
+   ...
+    "hits": [
+      {
+        "_index": "services",
+        "_type": "_doc",
+        "_id": "M1qAxYIBRmRqWkmH4Wya",
+        "_score": 1,
+        "_source": {
+          "apisix_latency": 0,
+          "route_id": "1",
+          "server": {
+            "version": "2.15.0",
+            "hostname": "apisix"
+          },
+          "request": {
+            "size": 102,
+            "uri": "/elasticsearch.do?q=hello",
+            "querystring": {
+              "q": "hello"
+            },
+            "headers": {
+              "user-agent": "curl/7.29.0",
+              "host": "127.0.0.1:9080",
+              "accept": "*/*"
+            },
+            "url": "http://127.0.0.1:9080/elasticsearch.do?q=hello",
+            "method": "GET"
+          },
+          "service_id": "",
+          "latency": 0,
+          "upstream": "127.0.0.1:1980",
+          "upstream_latency": 1,
+          "client_ip": "127.0.0.1",
+          "start_time": 1661170929107,
+          "response": {
+            "size": 192,
+            "headers": {
+              "date": "Mon, 22 Aug 2022 12:22:09 GMT",
+              "server": "APISIX/2.15.0",
+              "content-type": "text/plain; charset=utf-8",
+              "connection": "close",
+              "transfer-encoding": "chunked"
+            },
+            "status": 200
+          }
+        }
+      }
+    ]
+  }
+}
+```
+
+## 插件元数据设置
+
+| 名称       | 类型   | 必选项 | 默认值                                                       | 有效值 | 描述                                                         |
+| ---------- | ------ | ------ | ------------------------------------------------------------ | ------ | ------------------------------------------------------------ |
+| log_format | object | 可选   | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} |        | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](https://github.com/apache/apisix/blob/master/docs/en/latest/apisix-variable.md) 或 [Nginx 内置变量](http://nginx.org/en/docs/varindex.html)。请注意,**该设置是全局生效的**,因此在指定 log_format 后,将对所有绑定 elasticsearch-logger 的 Route 或 Service 生效。 |
+
+### 设置日志格式示例
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/elasticsearch-logger \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+    "log_format": {
+        "host": "$host",
+        "@timestamp": "$time_iso8601",
+        "client_ip": "$remote_addr"
+    }
+}'
+```
+
+在日志收集处,将得到类似下面的日志:
+
+```json
+{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"}
+{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"}
+```
+
+向配置 `elasticsearch-logger` 插件的路由发送请求
+
+```shell
+curl -i http://127.0.0.1:9080/elasticsearch.do\?q\=hello
+HTTP/1.1 200 OK
+...
+hello, world
+```
+
+现在,你可以从 Elasticsearch 获取相关日志。
+
+```shell
+curl -X GET "http://127.0.0.1:9200/services/_search" | jq .
+{
+  "took": 0,
+  ...
+  "hits": {
+    "total": {
+      "value": 1,
+      "relation": "eq"
+    },
+    "max_score": 1,
+    "hits": [
+      {
+        "_index": "services",
+        "_type": "_doc",
+        "_id": "NVqExYIBRmRqWkmH4WwG",
+        "_score": 1,
+        "_source": {
+          "@timestamp": "2022-08-22T20:26:31+08:00",
+          "client_ip": "127.0.0.1",
+          "host": "127.0.0.1",
+          "route_id": "1"
+        }
+      }
+    ]
+  }
+}
+```
+
+### 禁用插件元数据
+
+```shell
+curl http://127.0.0.1:9080/apisix/admin/plugin_metadata/elasticsearch-logger \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE
+```
+
+## 禁用插件
+
+当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务:```

Review Comment:
   ```suggestion
   当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务:
   ```



##########
docs/zh/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,278 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API 网关
+  - 插件
+  - Elasticsearch-logger
+  - 日志
+description: 本文介绍了 API 网关 Apache APISIX 的 elasticsearch-logger 插件。使用该插件可以将 APISIX 的日志数据推送到 Elasticserach。
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称          | 类型    | 是否必需 | 默认值               | 描述                                                         |

Review Comment:
   ```suggestion
   | 名称          | 类型    | 必选项 | 默认值               | 描述                                                         |
   ```



##########
docs/zh/latest/plugins/elasticsearch-logger.md:
##########
@@ -0,0 +1,278 @@
+---
+title: elasticsearch-logger
+keywords:
+  - APISIX
+  - API 网关
+  - 插件
+  - Elasticsearch-logger
+  - 日志
+description: 本文介绍了 API 网关 Apache APISIX 的 elasticsearch-logger 插件。使用该插件可以将 APISIX 的日志数据推送到 Elasticserach。
+---
+
+<!--
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+-->
+
+## 描述
+
+`elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储,启用该插件后 `Apache APISIX` 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Elaticsearch` 中。
+
+有关 `Apache APISIX` 的 `Batch-Processor` 的更多信息,请参考: [Batch-Processor](https://file+.vscode-resource.vscode-cdn.net/y%3A/apisix/docs/zh/latest/batch-processor.md)
+
+## 属性
+
+| 名称          | 类型    | 是否必需 | 默认值               | 描述                                                         |
+| ------------- | ------- | -------- | -------------------- | ------------------------------------------------------------ |
+| endpoint_addr | string  | 是       |                      | Elasticsearch API                                            |
+| field         | array   | 是       |                      | Elasticsearch `field`配置信息                                |
+| field.index   | string  | 是       |                      | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field) |
+| field.type    | string  | 否       | Elasticsearch 默认值 | Elasticsearch [_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field) |
+| auth          | array   | 否       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) 配置信息 |
+| auth.username | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username |
+| auth.password | string  | 是       |                      | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password |
+| ssl_verify    | boolean | 否       | true                 | 当设置为 `true` 则允许 SSL 验证,参考 [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake) |
+| timeout       | integer | 否       | 10                   | 发送给 Elasticsearch 请求超时时间                            |

Review Comment:
   ```suggestion
   | endpoint_addr | string  | 是       |                      | Elasticsearch API。                                           |
   | field         | array   | 是       |                      | Elasticsearch `field`配置信息。                                |
   | field.index   | string  | 是       |                      | Elasticsearch `[_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field)`。 |
   | field.type    | string  | 否       | Elasticsearch 默认值 | Elasticsearch `[_type field](https://www.elastic.co/guide/en/elasticsearch/reference/7.17/mapping-type-field.html#mapping-type-field)` |
   | auth          | array   | 否       |                      | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 配置信息 |
   | auth.username | string  | 是       |                      | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 用户名。 |
   | auth.password | string  | 是       |                      | Elasticsearch `[authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html)` 密码。 |
   | ssl_verify    | boolean | 否       | true                 | 当设置为 `true` 时则启用 SSL 验证。更多信息请参考 [lua-nginx-module](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake)。 |
   | timeout       | integer | 否       | 10                   | 发送给 Elasticsearch 请求超时时间。                            |
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: notifications-unsubscribe@apisix.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org